Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions backends/gpu/metal/metal_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,15 @@ MetalOps::MetalOps() : commandBuffer(nullptr), cur_int_args(0), cur_float_args(0
}

bufferIntArgs = device->newBuffer(TOTAL_INT_ARGS * sizeof(int), MTL::ResourceStorageModeShared);
if (!bufferIntArgs) {
std::cerr << "Failed to create buffer for int args!" << std::endl;
throw std::runtime_error("Failed to create buffer for int args");
}
bufferFloatArgs = device->newBuffer(TOTAL_FLOAT_ARGS * sizeof(float), MTL::ResourceStorageModeShared);
if (!bufferFloatArgs) {
std::cerr << "Failed to create buffer for float args!" << std::endl;
throw std::runtime_error("Failed to create buffer for float args");
}
load_kernel_metal();

addOps = new MetalKops("tensor_add_kernel", library);
Expand Down Expand Up @@ -115,10 +123,10 @@ void MetalOps::prepare() {
cur_float_args = 0;
}

int calc_offset(const Tensor* t) {
unsigned int calc_offset(const Tensor* t) {
char* base = reinterpret_cast<char*>(reinterpret_cast<MTL::Buffer*>(t->get_storage()->ctx)->contents());
char* pos = reinterpret_cast<char*>(t->get_data());
auto offset_res = pos - base;
unsigned int offset_res = pos - base;
return offset_res;
}

Expand Down Expand Up @@ -1428,6 +1436,10 @@ void MetalOps::mulSV(Tensor* dst, Tensor* src, float value) {

void* MetalOps::alloc(size_t size, void** ctx) {
MTL::Buffer* buffer = device->newBuffer(size, MTL::ResourceStorageModeShared);
if (!buffer) {
std::cerr << "Error allocating buffer of size " << size << std::endl;
abort();
}
*ctx = (void*)buffer;
return buffer->contents();
}
Expand Down
15 changes: 10 additions & 5 deletions lm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ std::vector<uint> trim_or_padding(const std::vector<uint>& src, uint max_len, ui
std::vector<uint> res = src;
if (src.size() > max_len) {
res.resize(max_len);
} else {
}
else {
res.resize(max_len, pad_id);
}
return res;
Expand Down Expand Up @@ -98,7 +99,7 @@ int main(int argc, char* argv[]) {

int opt;
int epochs = 10;
int batch_size = 4;
int batch_size = 16;
int gpu = 1;
int max_words_cnt = 256;
float lr = 0.001f;
Expand Down Expand Up @@ -225,6 +226,7 @@ int main(int argc, char* argv[]) {
adam.clip_grad(1.0f);
adam.step();
graph::validateAllNodesRefCnt(0);
// printAllTensors();
// printAllActions();
allocMemAndInitTensors();
std::cout << "Allocating memory " << std::endl
Expand Down Expand Up @@ -264,7 +266,8 @@ int main(int argc, char* argv[]) {
auto origin_size = src_token_ids.size();
if (src_token_ids.size() < num_steps) {
src_token_ids.resize(num_steps, loader.get_pad_id());
} else if (src_token_ids.size() > num_steps) {
}
else if (src_token_ids.size() > num_steps) {
src_token_ids.erase(src_token_ids.begin(), src_token_ids.end() - num_steps);
}
auto cur_step = origin_size - 1;
Expand Down Expand Up @@ -302,15 +305,17 @@ int main(int argc, char* argv[]) {
if (cur_step >= num_steps - 1) {
src_token_ids.push_back(max_index);
src_token_ids.erase(src_token_ids.begin(), src_token_ids.end() - num_steps);
} else {
}
else {
src_token_ids[++cur_step] = max_index;
}
}
std::cout << std::endl;
std::cout << "-----------------" << std::endl;
::free(res_buffer);
}
} else {
}
else {
init_dec_valid_lens_for_training(dec_valid_lens);
signal(SIGINT, signal_callback_handler);
int epoch = 0;
Expand Down