Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/v/config/configuration.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4835,6 +4835,12 @@ configuration::configuration()
{.needs_restart = needs_restart::yes, .visibility = visibility::tunable},
32,
{.min = 1})
, code_hugepages_enabled(
*this,
"code_hugepages_enabled",
"Map the binary into hugepages",
{.needs_restart = needs_restart::no, .visibility = visibility::tunable},
false)
, development_feature_property_testing_only(
*this,
"development_feature_property_testing_only",
Expand Down
2 changes: 2 additions & 0 deletions src/v/config/configuration.h
Original file line number Diff line number Diff line change
Expand Up @@ -837,6 +837,8 @@ struct configuration final : public config_store {
bounded_property<size_t> cloud_topics_produce_write_inflight_limit;
bounded_property<size_t> cloud_topics_produce_no_pid_concurrency;

property<bool> code_hugepages_enabled;

development_feature_property<int> development_feature_property_testing_only;

private:
Expand Down
22 changes: 14 additions & 8 deletions src/v/redpanda/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -144,14 +144,20 @@ redpanda_cc_binary(
srcs = [
"main.cc",
],
linkopts =
select({
":use_emit_relocs": [
"-Wl,--emit-relocs",
],
"//conditions:default": [
],
}),
linkopts = [
# Align loadable segments to 2 MB so the text segment is eligible for
# transparent huge pages (PMD-mapped). separate-loadable-segments
# ensures each PT_LOAD gets its own mmap at a 2 MB boundary rather
# than packing segments into a single mapping.
"-Wl,-z,max-page-size=2097152",
"-Wl,-z,separate-loadable-segments",
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how many PT_LOAD segments are there? i.e., are we wasting a lot of "space" if we don't fill them out to the next 2MB boundary

Copy link
Copy Markdown
Member Author

@StephanDollberg StephanDollberg Apr 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are 4 in the redpanda binary. We adding ~2MB (just conicidental similar to the 2MiB alignment) of total padding. This is about a 2% binary size increase.

From when I looked into this earlier.

PT_LOAD layout

Aligned:
LOAD  off 0x0000000  filesz 0x1fff980  R
LOAD  off 0x2000000  filesz 0x4f8b920  R E
LOAD  off 0x7000000  filesz 0x02630a0  RW
LOAD  off 0x7400000  filesz 0x0011698  RW

Baseline:
LOAD  off 0x0000000  filesz 0x1fff980  R
LOAD  off 0x1fff980  filesz 0x4f8b920  R E
LOAD  off 0x6f8b2c0  filesz 0x02630a0  RW
LOAD  off 0x71ee360  filesz 0x0011698  RW

Gap between segments
Aligned:
- R -> RX: 1,664 bytes
- RX -> RW: 476,896 bytes
- RW -> RW: 1,691,488 bytes
- total: 2,170,048 bytes

Baseline:
- R -> RX: 0
- RX -> RW: 32
- RW -> RW: 0
- total: 32 bytes

] + select({
Comment thread
StephanDollberg marked this conversation as resolved.
":use_emit_relocs": [
"-Wl,--emit-relocs",
],
"//conditions:default": [
],
}),
visibility = ["//visibility:public"],
deps = [
":application",
Expand Down
13 changes: 13 additions & 0 deletions src/v/redpanda/application.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
#include "security/audit/audit_log_manager.h"
#include "storage/api.h"
#include "storage/directories.h"
#include "syschecks/hugepages.h"
#include "syschecks/syschecks.h"
#include "utils/file_io.h"
#include "utils/human.h"
Expand Down Expand Up @@ -792,6 +793,18 @@ void application::check_environment() {
syschecks::systemd_message("checking environment (CPU, Mem)").get();
syschecks::cpu();
syschecks::memory(config::node().developer_mode());
if (config::shard_local_cfg().code_hugepages_enabled()) {
syschecks::promote_code_to_hugepages();
}
_code_hugepages_binding.emplace(
config::shard_local_cfg().code_hugepages_enabled.bind());
_code_hugepages_binding->watch([this] {
if ((*_code_hugepages_binding)()) {
syschecks::promote_code_to_hugepages();
} else {
syschecks::demote_code_from_hugepages();
}
Comment thread
StephanDollberg marked this conversation as resolved.
});
memory_groups().log_memory_group_allocations(_log);
storage::directories::initialize(
config::node().data_directory().as_sstring())
Expand Down
1 change: 1 addition & 0 deletions src/v/redpanda/application.h
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,7 @@ class application : public ssx::sharded_service_container {
ss::sharded<scheduling_groups_probe> _scheduling_groups_probe;

std::optional<config::binding<bool>> _abort_on_oom;
std::optional<config::binding<bool>> _code_hugepages_binding;

ss::sharded<memory_sampling> _memory_sampling;
ss::sharded<rpc::rpc_server> _rpc;
Expand Down
2 changes: 2 additions & 0 deletions src/v/syschecks/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,12 @@ load("//bazel:build.bzl", "redpanda_cc_library")
redpanda_cc_library(
name = "syschecks",
srcs = [
"hugepages.cc",
"pidfile.cc",
"syschecks.cc",
],
Comment thread
StephanDollberg marked this conversation as resolved.
hdrs = [
"hugepages.h",
"syschecks.h",
],
visibility = ["//visibility:public"],
Expand Down
137 changes: 137 additions & 0 deletions src/v/syschecks/hugepages.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,137 @@
// Copyright 2026 Redpanda Data, Inc.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.md
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0

#include "syschecks/hugepages.h"

#include "base/vlog.h"
#include "syschecks/syschecks.h"

#include <sys/mman.h>

#include <cstddef>
#include <link.h>

// MADV_COLLAPSE was added in Linux 6.1. Define it for older headers.
#ifndef MADV_COLLAPSE
#define MADV_COLLAPSE 25
#endif
Comment thread
StephanDollberg marked this conversation as resolved.

namespace syschecks {

namespace {

/// Invoke fn(addr, len) for each non-writable PT_LOAD segment across
/// all loaded ELF objects (main binary + shared libraries). This covers
/// .text (PF_R|PF_X) and .rodata (PF_R) segments.
template<typename Fn>
void for_each_ro_segment(Fn fn) {
dl_iterate_phdr(
[](struct dl_phdr_info* info, size_t /*size*/, void* data) -> int {
auto& callback = *static_cast<Fn*>(data);
for (int i = 0; i < info->dlpi_phnum; ++i) {
const auto& phdr = info->dlpi_phdr[i];
if (phdr.p_type != PT_LOAD) {
continue;
}
// Skip writable segments (.data, .bss).
if (phdr.p_flags & PF_W) {
continue;
}
auto addr = info->dlpi_addr + phdr.p_vaddr;
auto len = phdr.p_memsz;
if (len == 0) {
continue;
}
callback(reinterpret_cast<void*>(addr), static_cast<size_t>(len));
}
Comment thread
StephanDollberg marked this conversation as resolved.
return 0; // continue iteration
},
&fn);
}

} // namespace

void promote_code_to_hugepages() {
size_t total_bytes = 0;
size_t marked_bytes = 0;
size_t collapsed_bytes = 0;

for_each_ro_segment([&](void* addr, size_t len) {
total_bytes += len;

// Mark the VMA for huge pages. In "madvise" THP mode (the common
// default), khugepaged only scans VMAs with VM_HUGEPAGE set, so this
// is required for ongoing huge page maintenance — not just a hint.
if (::madvise(addr, len, MADV_HUGEPAGE) == 0) {
marked_bytes += len;
}

// Fault in all pages so MADV_COLLAPSE has something to work with.
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we want to sync fault in all huge pages? Should we still want on-demand mapping here? It's a lot of memory to waste.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we want to sync fault in all huge pages? Should we still want on-demand mapping here?

For performance reason? Best to take the page faults now? So from that POV I am on the no side.

(Note if we don't want this that then also rules out MADV_COLLAPSE altogether as per the comment).

It's a lot of memory to waste.

It's like 120MiB or so? I mean sure you are probably never going to need every single code page but I don't see much point in saving a few XX MB?

Copy link
Copy Markdown
Member

@travisdowns travisdowns Apr 16, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is that how big the executable segments covered are? Yeah I guess it's not too much. Do you happen to have any timings?

Best to take the page faults now?

Maybe yes, but this is more obvious if you are going to take them all eventually anyway (like the heap, and currently we don't even enable this lock-meomry option for the heap). If you would only take 5% of them over the lifetime of the process then this looks less appealing. I have no idea if it's 5% or 95% though (evidently depends at least a bit on workload).

Anyway I think it's fine.

One other thing though: why are we doing the MADV_HUGEPAGE and MADV_COLLAPSE? ISTM you only want one or the other: the former for lazy, the latter for sync full mapping. I.e., I feel like MADV_HUGEPAGE does nothing now, unless it's for kernels that support MADV_HUGEPAGE and not MADV_COLLAPSE?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is that how big the executable segments covered are?

Yeah

Do you happen to have any timings?

Timings of what sorry?

One other thing though: why are we doing the MADV_HUGEPAGE and MADV_COLLAPSE? ISTM you only want one or the other: the former for lazy, the latter for sync full mapping. I.e., I feel like MADV_HUGEPAGE does nothing now, unless it's for kernels that support MADV_HUGEPAGE and not MADV_COLLAPSE?

Yeah exactly, MADV_HUGEPAGE should only affect the latter. I don't think it hurts?

We could do the whole if (not collapse fails) else { madv_hugepage } dance but I am not entirely sure about all the MADV_COLLAPSE return value semantics (.e.g.: as per docs one "area" in the range might fail to map which will already make it not return clean so we would do both in that case anyway).

No strong feelings though.

// At startup most pages are still demand-paged.
// In theory this is not needed with MADV_COLLAPSE but the docs leave a
// cop out so we are just explicit in any case.
// Incompatible with ASAN, disable if on
#if !__has_feature(address_sanitizer)
auto* base = static_cast<volatile const char*>(addr);
for (size_t off = 0; off < len; off += 4096) {
[[maybe_unused]] char c = base[off];
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

FWIW just base[off] works also

}
#endif

// Synchronously collapse 4 KB pages into 2 MB huge pages
// (Linux 6.1+). Without this, khugepaged promotes pages in the
// background over the next few seconds; MADV_COLLAPSE makes it
// immediate (best effort).
if (::madvise(addr, len, MADV_COLLAPSE) == 0) {
collapsed_bytes += len;
}
Comment thread
StephanDollberg marked this conversation as resolved.
});

if (total_bytes > 0) {
vlog(
checklog.info,
"hugepages: {}/{} MiB marked, {}/{} MiB collapsed",
marked_bytes / (1024 * 1024),
total_bytes / (1024 * 1024),
collapsed_bytes / (1024 * 1024),
total_bytes / (1024 * 1024));
Comment thread
StephanDollberg marked this conversation as resolved.
}
}

void demote_code_from_hugepages() {
size_t total_bytes = 0;
size_t demoted_bytes = 0;

for_each_ro_segment([&](void* addr, size_t len) {
total_bytes += len;

// Prevent khugepaged from re-promoting these pages.
if (::madvise(addr, len, MADV_NOHUGEPAGE) != 0) {
return;
}

// MADV_NOHUGEPAGE only prevents future promotions — existing PMD
// entries for file-backed pages are not split. MADV_DONTNEED drops
// the page table entries. They will be re-faulted at 4 KB granularity
// (since MADV_NOHUGEPAGE is set).
if (::madvise(addr, len, MADV_DONTNEED) == 0) {
demoted_bytes += len;
}
});

if (total_bytes > 0) {
vlog(
checklog.info,
"hugepages: demoted {}/{} MiB from huge pages",
demoted_bytes / (1024 * 1024),
total_bytes / (1024 * 1024));
}
}

} // namespace syschecks
24 changes: 24 additions & 0 deletions src/v/syschecks/hugepages.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
/*
* Copyright 2026 Redpanda Data, Inc.
*
* Use of this software is governed by the Business Source License
* included in the file licenses/BSL.md
*
* As of the Change Date specified in that file, in accordance with
* the Business Source License, use of this software will be governed
* by the Apache License, Version 2.0
*/

#pragma once

namespace syschecks {

/// Promote file-backed executable mappings (code segments) to transparent huge
/// pages.
void promote_code_to_hugepages();

/// Undo the effect of promote_code_to_hugepages(). Marks executable VMAs with
/// MADV_NOHUGEPAGE
Comment thread
StephanDollberg marked this conversation as resolved.
void demote_code_from_hugepages();

} // namespace syschecks
Loading