Skip to content
Open
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 9 additions & 24 deletions xllm/core/runtime/forward_shared_memory_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -344,9 +344,10 @@ inline size_t get_dit_forward_input_size(const DiTForwardInput& input) {
return size;
}

inline size_t get_dit_forward_output_size(const DiTForwardOutput& output) {
size_t size = type_size<uint64_t>; // vector size
for (const auto& tensor : output.tensors) {
inline size_t get_vector_tensor_size(
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

According to the repository style guide (Rule 177), file-local functions used only within a single .cpp file must be placed in an anonymous namespace to ensure internal linkage and avoid potential symbol collisions. Since get_vector_tensor_size is not declared in the header file, it should be moved into an anonymous namespace.

References
  1. File-local functions and variables (used only within a single .cpp file) must be placed in an anonymous namespace. (link)

const std::vector<torch::Tensor>& tensor_vec) {
size_t size = type_size<int32_t>; // vector size
for (const auto& tensor : tensor_vec) {
size += get_tensor_size(tensor);
}
return size;
Expand Down Expand Up @@ -1017,14 +1018,6 @@ inline void write_dit_forward_input(RawInputSerializeContext& context,
write_dit_generation_params(context, input.generation_params);
}

inline void write_dit_forward_output(char*& buffer,
const DiTForwardOutput& output) {
write_data(buffer, static_cast<uint64_t>(output.tensors.size()));
for (const auto& tensor : output.tensors) {
write_tensor(buffer, tensor);
}
}

inline void safe_advance_buffer(const char*& buffer, size_t offset) {
if (buffer != nullptr) {
buffer += offset;
Expand Down Expand Up @@ -1876,16 +1869,6 @@ inline void read_dit_forward_input(ReadContext& context,
read_dit_generation_params(context, input.generation_params);
}

inline void read_dit_forward_output(const char*& buffer,
DiTForwardOutput& output) {
uint64_t size;
read_data(buffer, size);
output.tensors.resize(size);
for (auto& tensor : output.tensors) {
read_tensor(buffer, tensor);
}
}

inline void initialize_device_buffer_session(ReadContext& context,
ForwardInput& forward_input,
const torch::Device& device,
Expand Down Expand Up @@ -2260,8 +2243,10 @@ size_t calculate_raw_forward_output_size(const RawForwardOutput& output) {
size += get_vector_size(output.out_tokens);
size += get_vector_size(output.out_logprobs);
size += type_size<int32_t>; // prepared_layer_id
// mm_embedding_data
size += get_vector_tensor_size(output.mm_embeddings);
// dit output data
size += get_dit_forward_output_size(output.dit_forward_output);
size += get_vector_tensor_size(output.dit_forward_output.tensors);

return size;
}
Expand Down Expand Up @@ -2328,7 +2313,7 @@ void deserialize_raw_forward_output(const char* buffer,

read_vector_tensor(buffer, output.mm_embeddings);
// read dit output
read_dit_forward_output(buffer, output.dit_forward_output);
read_vector_tensor(buffer, output.dit_forward_output.tensors);
}

void serialize_raw_forward_output(const RawForwardOutput& output,
Expand All @@ -2344,7 +2329,7 @@ void serialize_raw_forward_output(const RawForwardOutput& output,

write_vector_tensor(buffer, output.mm_embeddings);
// write dit output
write_dit_forward_output(buffer, output.dit_forward_output);
write_vector_tensor(buffer, output.dit_forward_output.tensors);
}

void convert_raw_forward_input_to_forward_input(RawForwardInput& raw_input,
Expand Down
Loading