Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 39 additions & 9 deletions extension/runner_util/inputs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,17 +86,47 @@ Result<BufferCleanup> prepare_input_tensors(
Debug, "Verifying and setting input for non-tensor input %zu", i);

if (tag.get() == Tag::Int) {
int64_t int_input;
std::memcpy(&int_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(int_input), i);
if (buffer_size != sizeof(int64_t)) {
ET_LOG(
Error,
"Int input at index %zu has size %zu, expected sizeof(int64_t) %zu",
i,
buffer_size,
sizeof(int64_t));
err = Error::InvalidArgument;
} else {
int64_t int_input;
std::memcpy(&int_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(int_input), i);
}
} else if (tag.get() == Tag::Double) {
double double_input;
std::memcpy(&double_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(double_input), i);
if (buffer_size != sizeof(double)) {
ET_LOG(
Error,
"Double input at index %zu has size %zu, expected sizeof(double) %zu",
i,
buffer_size,
sizeof(double));
err = Error::InvalidArgument;
} else {
double double_input;
std::memcpy(&double_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(double_input), i);
}
} else if (tag.get() == Tag::Bool) {
bool bool_input;
std::memcpy(&bool_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(bool_input), i);
if (buffer_size != sizeof(bool)) {
ET_LOG(
Error,
"Bool input at index %zu has size %zu, expected sizeof(bool) %zu",
i,
buffer_size,
sizeof(bool));
err = Error::InvalidArgument;
} else {
bool bool_input;
std::memcpy(&bool_input, buffer, buffer_size);
err = method.set_input(runtime::EValue(bool_input), i);
}
} else {
ET_LOG(
Error,
Expand Down
9 changes: 7 additions & 2 deletions extension/runner_util/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,22 @@ include(${EXECUTORCH_ROOT}/tools/cmake/Test.cmake)

add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleIntBool.pte"
COMMAND ${PYTHON_EXECUTABLE} -m test.models.export_program --modules
"ModuleAdd" --outdir "${CMAKE_CURRENT_BINARY_DIR}"
"ModuleAdd,ModuleIntBool" --outdir "${CMAKE_CURRENT_BINARY_DIR}"
WORKING_DIRECTORY ${EXECUTORCH_ROOT}
)

add_custom_target(
executorch_runner_util_test_resources
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte"
"${CMAKE_CURRENT_BINARY_DIR}/ModuleIntBool.pte"
)

set(test_env "ET_MODULE_ADD_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte")
set(test_env
"ET_MODULE_ADD_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleAdd.pte"
"ET_MODULE_INTBOOL_PATH=${CMAKE_CURRENT_BINARY_DIR}/ModuleIntBool.pte"
)

set(_test_srcs inputs_test.cpp)

Expand Down
192 changes: 164 additions & 28 deletions extension/runner_util/test/inputs_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@

#include <executorch/extension/runner_util/inputs.h>

#include <cstdlib>
#include <cstring>

#include <executorch/extension/data_loader/file_data_loader.h>
#include <executorch/runtime/core/exec_aten/exec_aten.h>
#include <executorch/runtime/core/span.h>
Expand Down Expand Up @@ -40,52 +43,81 @@ class InputsTest : public ::testing::Test {
void SetUp() override {
torch::executor::runtime_init();

// Create a loader for the serialized ModuleAdd program.
const char* path = std::getenv("ET_MODULE_ADD_PATH");
Result<FileDataLoader> loader = FileDataLoader::from(path);
ASSERT_EQ(loader.error(), Error::Ok);
loader_ = std::make_unique<FileDataLoader>(std::move(loader.get()));
// Load ModuleAdd
const char* add_path = std::getenv("ET_MODULE_ADD_PATH");
ASSERT_NE(add_path, nullptr)
<< "ET_MODULE_ADD_PATH environment variable must be set";
Result<FileDataLoader> add_loader = FileDataLoader::from(add_path);
ASSERT_EQ(add_loader.error(), Error::Ok);
add_loader_ = std::make_unique<FileDataLoader>(std::move(add_loader.get()));

Result<Program> add_program = Program::load(
add_loader_.get(), Program::Verification::InternalConsistency);
ASSERT_EQ(add_program.error(), Error::Ok);
add_program_ = std::make_unique<Program>(std::move(add_program.get()));

add_mmm_ = std::make_unique<ManagedMemoryManager>(
/*planned_memory_bytes=*/32 * 1024U,
/*method_allocator_bytes=*/32 * 1024U);

Result<Method> add_method =
add_program_->load_method("forward", &add_mmm_->get());
ASSERT_EQ(add_method.error(), Error::Ok);
add_method_ = std::make_unique<Method>(std::move(add_method.get()));

// Load ModuleIntBool
const char* intbool_path = std::getenv("ET_MODULE_INTBOOL_PATH");
ASSERT_NE(intbool_path, nullptr)
<< "ET_MODULE_INTBOOL_PATH environment variable must be set";
Result<FileDataLoader> intbool_loader = FileDataLoader::from(intbool_path);
ASSERT_EQ(intbool_loader.error(), Error::Ok);
intbool_loader_ =
std::make_unique<FileDataLoader>(std::move(intbool_loader.get()));

// Use it to load the program.
Result<Program> program = Program::load(
loader_.get(), Program::Verification::InternalConsistency);
ASSERT_EQ(program.error(), Error::Ok);
program_ = std::make_unique<Program>(std::move(program.get()));
Result<Program> intbool_program = Program::load(
intbool_loader_.get(), Program::Verification::InternalConsistency);
ASSERT_EQ(intbool_program.error(), Error::Ok);
intbool_program_ =
std::make_unique<Program>(std::move(intbool_program.get()));

mmm_ = std::make_unique<ManagedMemoryManager>(
intbool_mmm_ = std::make_unique<ManagedMemoryManager>(
/*planned_memory_bytes=*/32 * 1024U,
/*method_allocator_bytes=*/32 * 1024U);

// Load the forward method.
Result<Method> method = program_->load_method("forward", &mmm_->get());
ASSERT_EQ(method.error(), Error::Ok);
method_ = std::make_unique<Method>(std::move(method.get()));
Result<Method> intbool_method =
intbool_program_->load_method("forward", &intbool_mmm_->get());
ASSERT_EQ(intbool_method.error(), Error::Ok);
intbool_method_ = std::make_unique<Method>(std::move(intbool_method.get()));
}

private:
// Must outlive method_, but tests shouldn't need to touch them.
std::unique_ptr<FileDataLoader> loader_;
std::unique_ptr<ManagedMemoryManager> mmm_;
std::unique_ptr<Program> program_;
std::unique_ptr<FileDataLoader> add_loader_;
std::unique_ptr<Program> add_program_;
std::unique_ptr<ManagedMemoryManager> add_mmm_;

std::unique_ptr<FileDataLoader> intbool_loader_;
std::unique_ptr<Program> intbool_program_;
std::unique_ptr<ManagedMemoryManager> intbool_mmm_;

protected:
std::unique_ptr<Method> method_;
std::unique_ptr<Method> add_method_;
std::unique_ptr<Method> intbool_method_;
};

TEST_F(InputsTest, Smoke) {
Result<BufferCleanup> input_buffers = prepare_input_tensors(*method_);
Result<BufferCleanup> input_buffers = prepare_input_tensors(*add_method_);
ASSERT_EQ(input_buffers.error(), Error::Ok);
auto input_err = method_->set_input(executorch::runtime::EValue(1.0), 2);
auto input_err = add_method_->set_input(executorch::runtime::EValue(1.0), 2);
ASSERT_EQ(input_err, Error::Ok);

// We can't look at the input tensors, but we can check that the outputs make
// sense after executing the method.
Error status = method_->execute();
Error status = add_method_->execute();
ASSERT_EQ(status, Error::Ok);

// Get the single output, which should be a floating-point Tensor.
ASSERT_EQ(method_->outputs_size(), 1);
const EValue& output_value = method_->get_output(0);
ASSERT_EQ(add_method_->outputs_size(), 1);
const EValue& output_value = add_method_->get_output(0);
ASSERT_EQ(output_value.tag, Tag::Tensor);
Tensor output = output_value.toTensor();
ASSERT_EQ(output.scalar_type(), ScalarType::Float);
Expand All @@ -107,14 +139,14 @@ TEST_F(InputsTest, ExceedingInputCountLimitFails) {
// The smoke test above demonstrated that we can prepare inputs with the
// default limits. It should fail if we lower the max below the number of
// actual inputs.
MethodMeta method_meta = method_->method_meta();
MethodMeta method_meta = add_method_->method_meta();
size_t num_inputs = method_meta.num_inputs();
ASSERT_GE(num_inputs, 1);
executorch::extension::PrepareInputTensorsOptions options;
options.max_inputs = num_inputs - 1;

Result<BufferCleanup> input_buffers =
prepare_input_tensors(*method_, options);
prepare_input_tensors(*add_method_, options);
ASSERT_NE(input_buffers.error(), Error::Ok);
}

Expand All @@ -128,7 +160,7 @@ TEST_F(InputsTest, ExceedingInputAllocationLimitFails) {
options.max_total_allocation_size = 1;

Result<BufferCleanup> input_buffers =
prepare_input_tensors(*method_, options);
prepare_input_tensors(*add_method_, options);
ASSERT_NE(input_buffers.error(), Error::Ok);
}

Expand Down Expand Up @@ -186,3 +218,107 @@ TEST(BufferCleanupTest, Smoke) {
// complaint.
bc2.reset();
}

TEST_F(InputsTest, DoubleInputWrongSizeFails) {
MethodMeta method_meta = add_method_->method_meta();

// ModuleAdd has 3 inputs: tensor, tensor, double (alpha)
ASSERT_EQ(method_meta.num_inputs(), 3);

// Verify input 2 is a Double
auto tag = method_meta.input_tag(2);
ASSERT_TRUE(tag.ok());
ASSERT_EQ(tag.get(), Tag::Double);

// Create input_buffers with wrong size for the Double input
std::vector<std::pair<char*, size_t>> input_buffers;

// Allocate correct buffers for tensors (inputs 0 and 1)
auto tensor0_meta = method_meta.input_tensor_meta(0);
auto tensor1_meta = method_meta.input_tensor_meta(1);
ASSERT_TRUE(tensor0_meta.ok());
ASSERT_TRUE(tensor1_meta.ok());

std::vector<char> buf0(tensor0_meta->nbytes(), 0);
std::vector<char> buf1(tensor1_meta->nbytes(), 0);

// ModuleAdd expects alpha=1.0. Need to set this correctly, otherwise
// set_input fails validation before the buffer overflow happens.
double alpha = 1.0;
// Double is size 8; use a larger buffer to invoke overflow.
char large_buffer[16];
memcpy(large_buffer, &alpha, sizeof(double));

input_buffers.push_back({buf0.data(), buf0.size()});
input_buffers.push_back({buf1.data(), buf1.size()});
input_buffers.push_back({large_buffer, sizeof(large_buffer)});

Result<BufferCleanup> result =
prepare_input_tensors(*add_method_, {}, input_buffers);
EXPECT_EQ(result.error(), Error::InvalidArgument);
}

TEST_F(InputsTest, IntBoolInputWrongSizeFails) {
MethodMeta method_meta = intbool_method_->method_meta();

// ModuleIntBool has 3 inputs: tensor, int, bool
ASSERT_EQ(method_meta.num_inputs(), 3);

// Verify input types
auto int_tag = method_meta.input_tag(1);
ASSERT_TRUE(int_tag.ok());
ASSERT_EQ(int_tag.get(), Tag::Int);

auto bool_tag = method_meta.input_tag(2);
ASSERT_TRUE(bool_tag.ok());
ASSERT_EQ(bool_tag.get(), Tag::Bool);

// Allocate correct buffer for tensor (input 0)
auto tensor0_meta = method_meta.input_tensor_meta(0);
ASSERT_TRUE(tensor0_meta.ok());
std::vector<char> buf0(tensor0_meta->nbytes(), 0);

// Prepare scalar values
int64_t y = 1;
bool z = true;

// Test 1: Int input with wrong size
{
std::vector<std::pair<char*, size_t>> input_buffers;

// Int is size 8; use a larger buffer to invoke overflow.
char large_int_buffer[16];
memcpy(large_int_buffer, &y, sizeof(int64_t));

char bool_buffer[sizeof(bool)];
memcpy(bool_buffer, &z, sizeof(bool));

input_buffers.push_back({buf0.data(), buf0.size()});
input_buffers.push_back({large_int_buffer, sizeof(large_int_buffer)});
input_buffers.push_back({bool_buffer, sizeof(bool_buffer)});

Result<BufferCleanup> result =
prepare_input_tensors(*intbool_method_, {}, input_buffers);
EXPECT_EQ(result.error(), Error::InvalidArgument);
}

// Test 2: Bool input with wrong size
{
std::vector<std::pair<char*, size_t>> input_buffers;

char int_buffer[sizeof(int64_t)];
memcpy(int_buffer, &y, sizeof(int64_t));

// Bool is size 1; use a larger buffer to invoke overflow.
char large_bool_buffer[8];
memcpy(large_bool_buffer, &z, sizeof(bool));

input_buffers.push_back({buf0.data(), buf0.size()});
input_buffers.push_back({int_buffer, sizeof(int_buffer)});
input_buffers.push_back({large_bool_buffer, sizeof(large_bool_buffer)});

Result<BufferCleanup> result =
prepare_input_tensors(*intbool_method_, {}, input_buffers);
EXPECT_EQ(result.error(), Error::InvalidArgument);
}
}
1 change: 1 addition & 0 deletions extension/runner_util/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -28,5 +28,6 @@ def define_common_targets(is_fbcode = False):
],
env = {
"ET_MODULE_ADD_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleAdd.pte])",
"ET_MODULE_INTBOOL_PATH": "$(location fbcode//executorch/test/models:exported_programs[ModuleIntBool.pte])",
},
)
12 changes: 12 additions & 0 deletions test/models/export_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,18 @@ def get_random_inputs(self):
return (torch.randn(10, 10, 10),)


# Used for testing int and bool inputs.
class ModuleIntBool(torch.nn.Module):
def __init__(self):
super().__init__()

def forward(self, x: torch.Tensor, y: int, z: bool):
return x + y + int(z)

def get_random_inputs(self):
return (torch.ones(1), 1, True)


class ModuleNoOp(nn.Module):
def __init__(self):
super(ModuleNoOp, self).__init__()
Expand Down
1 change: 1 addition & 0 deletions test/models/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ def define_common_targets():
"ModuleMultipleEntry",
"ModuleNoKVCache",
"ModuleIndex",
"ModuleIntBool",
"ModuleDynamicCatUnallocatedIO",
"ModuleSimpleTrain",
"ModuleStateful",
Expand Down
Loading