From 2491fc315db5c8c405bf76c2410391dc64ba571a Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 29 Nov 2024 09:00:35 +0000 Subject: [PATCH 1/4] net_buffer_tuner: increase netdev_budget if we see time_squeeze events time_squeeze for a softnet poll means we ran out of time specified in netdev_budget_usecs; if we see increases in time_squeeze, increase netdev_budget Signed-off-by: Alan Maguire --- src/net_buffer_tuner.bpf.c | 70 +++++++++++++++++++++-------- src/net_buffer_tuner.c | 48 +++++++++++++++----- src/net_buffer_tuner.h | 2 + test/Makefile | 1 + test/budget_test.sh | 92 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 183 insertions(+), 30 deletions(-) create mode 100644 test/budget_test.sh diff --git a/src/net_buffer_tuner.bpf.c b/src/net_buffer_tuner.bpf.c index bef6f0e..eb8d490 100644 --- a/src/net_buffer_tuner.bpf.c +++ b/src/net_buffer_tuner.bpf.c @@ -30,11 +30,17 @@ __u64 drop_interval_start = 0; __u64 flow_limit_cpu_bitmap = 0; int netdev_max_backlog = 0; +int netdev_budget = 0; #ifdef BPFTUNE_LEGACY SEC("kretprobe/enqueue_to_backlog") int BPF_KRETPROBE(bpftune_enqueue_to_backlog, int ret) #else + +BPF_MAP_DEF(time_squeeze_map, BPF_MAP_TYPE_PERCPU_ARRAY, unsigned int, unsigned int, 1, 0); + +extern const struct softnet_data softnet_data __ksym; + SEC("fexit/enqueue_to_backlog") int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, unsigned int *qtail, int ret) @@ -64,28 +70,56 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, drop_count = 1; drop_interval_start = time; } - if (drop_count < (max_backlog >> 4)) - return 0; - - old[0] = max_backlog; - new[0] = BPFTUNE_GROW_BY_DELTA(max_backlog); - send_net_sysctl_event(NULL, NETDEV_MAX_BACKLOG_INCREASE, - NETDEV_MAX_BACKLOG, old, new, &event); + if (drop_count >= (max_backlog >> 4)) { + old[0] = max_backlog; + new[0] = BPFTUNE_GROW_BY_DELTA(max_backlog); + send_net_sysctl_event(NULL, NETDEV_MAX_BACKLOG_INCREASE, + NETDEV_MAX_BACKLOG, old, new, &event); #ifdef BPFTUNE_LEGACY - int cpu = bpf_get_smp_processor_id(); + int cpu = bpf_get_smp_processor_id(); #endif - /* ensure flow limits prioritize small flows on this cpu */ - if (cpu < 64) { - cpubit = 1 << cpu; - if (!(flow_limit_cpu_bitmap & cpubit)) { - old[0] = flow_limit_cpu_bitmap; - new[0] = flow_limit_cpu_bitmap |= cpubit; - if (!send_net_sysctl_event(NULL, FLOW_LIMIT_CPU_SET, - FLOW_LIMIT_CPU_BITMAP, - old, new, &event)) - flow_limit_cpu_bitmap = new[0]; + /* ensure flow limits prioritize small flows on this cpu */ + if (cpu < 64) { + cpubit = 1 << cpu; + if (!(flow_limit_cpu_bitmap & cpubit)) { + old[0] = flow_limit_cpu_bitmap; + new[0] = flow_limit_cpu_bitmap |= cpubit; + if (!send_net_sysctl_event(NULL, FLOW_LIMIT_CPU_SET, + FLOW_LIMIT_CPU_BITMAP, + old, new, &event)) + flow_limit_cpu_bitmap = new[0]; + } } } return 0; } + +#ifndef BPFTUNE_LEGACY +SEC("fexit/enqueue_to_backlog") +int BPF_PROG(net_rx_action) +{ + struct bpftune_event event = { 0 }; + long old[3], new[3]; + struct softnet_data *sd; + unsigned int time_squeeze, *last_time_squeeze; + unsigned int zero = 0; + + sd = (struct softnet_data *)bpf_this_cpu_ptr(&softnet_data); + + time_squeeze = BPFTUNE_CORE_READ(sd, time_squeeze); + if (!time_squeeze) + return 0; + last_time_squeeze = bpf_map_lookup_elem(&time_squeeze_map, &zero); + if (!last_time_squeeze) + return 0; + if (time_squeeze <= *last_time_squeeze) + return 0; + *last_time_squeeze = time_squeeze; + old[0] = netdev_budget; + new[0] = BPFTUNE_GROW_BY_DELTA(netdev_budget); + send_net_sysctl_event(NULL, NETDEV_BUDGET_INCREASE, + NETDEV_BUDGET, old, new, &event); + return 0; +} +#endif diff --git a/src/net_buffer_tuner.c b/src/net_buffer_tuner.c index 359af05..43a702c 100644 --- a/src/net_buffer_tuner.c +++ b/src/net_buffer_tuner.c @@ -17,23 +17,29 @@ static struct bpftunable_desc descs[] = { { FLOW_LIMIT_CPU_BITMAP, BPFTUNABLE_SYSCTL, "net.core.flow_limit_cpu_bitmap", 0, 1 }, +{ NETDEV_BUDGET, BPFTUNABLE_SYSCTL, "net.core.netdev_budget", + 0, 1 }, }; static struct bpftunable_scenario scenarios[] = { { NETDEV_MAX_BACKLOG_INCREASE, "need to increase max backlog size", "Need to increase backlog size to prevent drops for faster connection" }, { FLOW_LIMIT_CPU_SET, "need to set per-cpu bitmap value", - "Need to set flow limit per-cpu to prioritize small flows" } + "Need to set flow limit per-cpu to prioritize small flows" }, +{ NETDEV_BUDGET_INCREASE, "need to increase # of packets processed per NAPI poll", + "Need to increase number of packets processed across network devices during NAPI poll to use all of net.core.netdev_budget_usecs" } }; int init(struct bpftuner *tuner) { long cpu_bitmap = 0; long max_backlog = 0; + long budget = 0; int err; bpftune_sysctl_read(0, "net.core.flow_limit_cpu_bitmap", &cpu_bitmap); bpftune_sysctl_read(0, "net.core.netdev_max_backlog", &max_backlog); + bpftune_sysctl_read(0, "net.core.netdev_budget", &budget); err = bpftuner_bpf_open(net_buffer, tuner); if (err) return err; @@ -44,6 +50,8 @@ int init(struct bpftuner *tuner) cpu_bitmap); bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog, max_backlog); + bpftuner_bpf_var_set(net_buffer, tuner, netdev_budget, + budget); err = bpftuner_bpf_attach(net_buffer, tuner); if (err) return err; @@ -64,7 +72,7 @@ void event_handler(struct bpftuner *tuner, { int scenario = event->scenario_id; const char *tunable; - int id; + int id, ret; /* netns cookie not supported; ignore */ if (event->netns_cookie == (unsigned long)-1) @@ -73,21 +81,23 @@ void event_handler(struct bpftuner *tuner, id = event->update[0].id; tunable = bpftuner_tunable_name(tuner, id); if (!tunable) { - bpftune_log(LOG_DEBUG, "unknown tunable [%d] for tcp_buffer_tuner\n", id); + bpftune_log(LOG_DEBUG, "unknown tunable [%d] for net_buffer_tuner\n", id); return; } switch (id) { case NETDEV_MAX_BACKLOG: - bpftuner_tunable_sysctl_write(tuner, id, scenario, - event->netns_cookie, 1, - (long int *)event->update[0].new, + ret = bpftuner_tunable_sysctl_write(tuner, id, scenario, + event->netns_cookie, 1, + (long int *)event->update[0].new, "Due to excessive drops, change %s from (%ld) -> (%ld)\n", - tunable, - event->update[0].old[0], + tunable, + event->update[0].old[0], + event->update[0].new[0]); + if (!ret) { + /* update value of netdev_max_backlog for BPF program */ + bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog, event->update[0].new[0]); - /* update value of netdev_max_backlog for BPF program */ - bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog, - event->update[0].new[0]); + } break; case FLOW_LIMIT_CPU_BITMAP: bpftuner_tunable_sysctl_write(tuner, id, scenario, @@ -97,6 +107,20 @@ void event_handler(struct bpftuner *tuner, tunable, event->update[0].old[0], event->update[0].new[0]); - + break; + case NETDEV_BUDGET: + ret = bpftuner_tunable_sysctl_write(tuner, id, scenario, + event->netns_cookie, 1, + (long int *)event->update[0].new, +"To maximize # packets processed per NAPI cycle, change %s from (%ld) -> (%ld)\n", + tunable, + event->update[0].old[0], + event->update[0].new[0]); + if (!ret) { + /* update value of netdev_budget for BPF program */ + bpftuner_bpf_var_set(net_buffer, tuner, netdev_budget, + event->update[0].new[0]); + } + break; } } diff --git a/src/net_buffer_tuner.h b/src/net_buffer_tuner.h index 3667ba1..1a89e27 100644 --- a/src/net_buffer_tuner.h +++ b/src/net_buffer_tuner.h @@ -22,10 +22,12 @@ enum net_buffer_tunables { NETDEV_MAX_BACKLOG, FLOW_LIMIT_CPU_BITMAP, + NETDEV_BUDGET, NET_BUFFER_NUM_TUNABLES, }; enum net_buffer_scenarios { NETDEV_MAX_BACKLOG_INCREASE, FLOW_LIMIT_CPU_SET, + NETDEV_BUDGET_INCREASE, }; diff --git a/test/Makefile b/test/Makefile index 49f70b6..dc56d32 100644 --- a/test/Makefile +++ b/test/Makefile @@ -28,6 +28,7 @@ TUNER_TESTS = support_test log_test service_test inotify_test cap_test \ podman_globalonly_test podman_globalonly_legacy_test \ sysctl_test sysctl_legacy_test sysctl_netns_test \ netns_test netns_legacy_test \ + budget_test \ backlog_test backlog_legacy_test \ frag_test frag_legacy_test \ neigh_table_test neigh_table_v4only_test \ diff --git a/test/budget_test.sh b/test/budget_test.sh new file mode 100644 index 0000000..743e01d --- /dev/null +++ b/test/budget_test.sh @@ -0,0 +1,92 @@ +#!/usr/bin/bash +# +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# Copyright (c) 2023, Oracle and/or its affiliates. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License v2 as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this program; if not, write to the +# Free Software Foundation, Inc., 59 Temple Place - Suite 330, +# Boston, MA 021110-1307, USA. +# + +# run iperf3 test with low netdev_budget, ensure tuner increases it. + +PORT=5201 + +. ./test_lib.sh + +SLEEPTIME=1 +TIMEOUT=30 +MAX_CONN=50 + +for FAMILY in ipv4 ipv6 ; do + + for CLIENT_OPTS in "" ; do + case $FAMILY in + ipv4) + ADDR=$VETH2_IPV4 + ;; + ipv6) + ADDR=$VETH2_IPV6 + ;; + esac + + test_start "$0|budget test to $ADDR:$PORT $FAMILY opts $CLIENT_OPTS $LATENCY" + + budget_orig=($(sysctl -n net.core.netdev_max_backlog)) + test_setup true + + sysctl -w net.core.netdev_budget=5 + budget_pre=($(sysctl -n net.core.netdev_budget)) + declare -A results + for MODE in baseline test ; do + + echo "Running ${MODE}..." + test_run_cmd_local "$IPERF3 -s -p $PORT &" + if [[ $MODE != "baseline" ]]; then + test_run_cmd_local "$BPFTUNE -s &" true + sleep $SETUPTIME + else + LOGSZ=$(wc -l $LOGFILE | awk '{print $1}') + LOGSZ=$(expr $LOGSZ + 1) + fi + test_run_cmd_local "ip netns exec $NETNS $IPERF3 -fm -t 20 $CLIENT_OPTS -c $PORT -c $ADDR" true + sleep $SLEEPTIME + + sresults=$(grep -E "sender" ${CMDLOG} | awk '{print $7}') + rresults=$(grep -E "receiver" ${CMDLOG} | awk '{print $7}') + units=$(grep -E "sender|receiver" ${CMDLOG} | awk '{print $8}' |head -1) + + if [[ $MODE == "baseline" ]]; then + read -r -a sbaseline_results <<< $sresults + read -r -a rbaseline_results <<< $rresults + echo "" > ${CMDLOG} + else + read -r -a stest_results <<< $sresults + read -r -a rtest_results <<< $rresults + + fi + sleep $SLEEPTIME + done + + budget_post=($(sysctl -n net.core.netdev_budget)) + sysctl -w net.core.netdev_budget="$budget_orig" + echo "budget ${budget_pre} -> ${budget_post}" + if [[ "$budget_post" -gt "$budget_pre" ]]; then + test_pass + fi + test_cleanup + done +done + +test_exit From 91acd5ea6aba33ca69ca8bc512361539b0a45f7e Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Fri, 29 Nov 2024 09:07:52 +0000 Subject: [PATCH 2/4] net_buffer_tuner: document netdev_budget handling Signed-off-by: Alan Maguire --- docs/bpftune-net-buffer.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/bpftune-net-buffer.rst b/docs/bpftune-net-buffer.rst index 08cc48d..7b4fe09 100644 --- a/docs/bpftune-net-buffer.rst +++ b/docs/bpftune-net-buffer.rst @@ -22,9 +22,18 @@ DESCRIPTION appropriate bit is set in the CPU bitmask to prioritize small flows for drop avoidance. + When NAPI polls to handle multiple packets, the number of packets + is limited by net.core.netdev_budget while the time is limited + by net.core.netdev_budget_usecs. If we hit the limit of number + of packets processed without using the usecs budget the time_squeezed + softnet stat is bumped; if we see increases in time_squeezed, bump + netdev_budget to use all budget usecs. + Tunables: - net.core.netdev_max_backlog: maximum per-cpu backlog queue length; default 1024. - net.core.flow_limit_cpu_bitmap: avoid drops for small flows on a per-cpu basis; default 0. + - net.core.netdev_budget: maximum number of packets processed in + a NAPI cycle From ac77c6ddd70df5e5b94b5265341dc006169df68b Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 2 Dec 2024 15:45:11 +0000 Subject: [PATCH 3/4] net_buffer_tuner: update netdev_budget_usecs also both netdev_budget and netdev_budget_usecs limit NAPI poll cycles, and if either is exhausted the time_squeeze value is updated. So to increase NAPI cycle time, both must be updated together. Signed-off-by: Alan Maguire --- docs/bpftune-net-buffer.rst | 4 +++- include/bpftune/bpftune.bpf.h | 4 ++++ src/net_buffer_tuner.bpf.c | 43 +++++++++++++++++++++++------------ src/net_buffer_tuner.c | 31 +++++++++++++++++++++++++ src/net_buffer_tuner.h | 1 + 5 files changed, 67 insertions(+), 16 deletions(-) diff --git a/docs/bpftune-net-buffer.rst b/docs/bpftune-net-buffer.rst index 7b4fe09..59b6935 100644 --- a/docs/bpftune-net-buffer.rst +++ b/docs/bpftune-net-buffer.rst @@ -27,7 +27,7 @@ DESCRIPTION by net.core.netdev_budget_usecs. If we hit the limit of number of packets processed without using the usecs budget the time_squeezed softnet stat is bumped; if we see increases in time_squeezed, bump - netdev_budget to use all budget usecs. + netdev_budget/netdev_budget_usecs. Tunables: @@ -37,3 +37,5 @@ DESCRIPTION a per-cpu basis; default 0. - net.core.netdev_budget: maximum number of packets processed in a NAPI cycle + - net.core.netdev_budget_usecs: maximum amount of time in microseconds + for a NAPI cycle diff --git a/include/bpftune/bpftune.bpf.h b/include/bpftune/bpftune.bpf.h index 5f881b9..2d3a1b7 100644 --- a/include/bpftune/bpftune.bpf.h +++ b/include/bpftune/bpftune.bpf.h @@ -273,6 +273,10 @@ unsigned long bpftune_init_net; bool debug; +unsigned int bpftune_sample_rate = 4; + +#define bpftune_skip_sample(count) ((++count % bpftune_sample_rate) != 0) + #define __barrier asm volatile("" ::: "memory") #define bpftune_log(...) __bpf_printk(__VA_ARGS__) diff --git a/src/net_buffer_tuner.bpf.c b/src/net_buffer_tuner.bpf.c index eb8d490..67ba754 100644 --- a/src/net_buffer_tuner.bpf.c +++ b/src/net_buffer_tuner.bpf.c @@ -31,16 +31,12 @@ __u64 flow_limit_cpu_bitmap = 0; int netdev_max_backlog = 0; int netdev_budget = 0; +int netdev_budget_usecs = 0; #ifdef BPFTUNE_LEGACY SEC("kretprobe/enqueue_to_backlog") int BPF_KRETPROBE(bpftune_enqueue_to_backlog, int ret) #else - -BPF_MAP_DEF(time_squeeze_map, BPF_MAP_TYPE_PERCPU_ARRAY, unsigned int, unsigned int, 1, 0); - -extern const struct softnet_data softnet_data __ksym; - SEC("fexit/enqueue_to_backlog") int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, unsigned int *qtail, int ret) @@ -58,7 +54,7 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, drop_count++; /* only sample subset of drops to reduce overhead. */ - if ((drop_count % 4) != 0) + if (bpftune_skip_sample(drop_count)) return 0; /* if we drop more than 1/16 of the backlog queue size/min, @@ -96,28 +92,45 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, } #ifndef BPFTUNE_LEGACY -SEC("fexit/enqueue_to_backlog") + +BPF_MAP_DEF(time_squeeze_map, BPF_MAP_TYPE_PERCPU_ARRAY, unsigned int, unsigned int, 1, 0); + +extern const struct softnet_data softnet_data __ksym; + +__u64 rx_count = 0; + +SEC("fexit/net_rx_action") int BPF_PROG(net_rx_action) { struct bpftune_event event = { 0 }; long old[3], new[3]; struct softnet_data *sd; - unsigned int time_squeeze, *last_time_squeeze; + unsigned int time_squeeze, *last_time_squeezep, last_time_squeeze; unsigned int zero = 0; + if (bpftune_skip_sample(rx_count)) + return 0; sd = (struct softnet_data *)bpf_this_cpu_ptr(&softnet_data); - + if (!sd) + return 0; time_squeeze = BPFTUNE_CORE_READ(sd, time_squeeze); if (!time_squeeze) return 0; - last_time_squeeze = bpf_map_lookup_elem(&time_squeeze_map, &zero); - if (!last_time_squeeze) + last_time_squeezep = bpf_map_lookup_elem(&time_squeeze_map, &zero); + if (!last_time_squeezep) + return 0; + last_time_squeeze = *last_time_squeezep; + /* if time squeeze increased for every instance of + * net_rx_action() since last sample, we increase. + */ + if (time_squeeze <= (last_time_squeeze + bpftune_sample_rate)) return 0; - if (time_squeeze <= *last_time_squeeze) + *last_time_squeezep = time_squeeze; + /* did not have previous time_squeeze value for comparison, bail. */ + if (!(last_time_squeeze)) return 0; - *last_time_squeeze = time_squeeze; - old[0] = netdev_budget; - new[0] = BPFTUNE_GROW_BY_DELTA(netdev_budget); + old[0] = (long)netdev_budget; + new[0] = BPFTUNE_GROW_BY_DELTA((long)netdev_budget); send_net_sysctl_event(NULL, NETDEV_BUDGET_INCREASE, NETDEV_BUDGET, old, new, &event); return 0; diff --git a/src/net_buffer_tuner.c b/src/net_buffer_tuner.c index 43a702c..7ae3337 100644 --- a/src/net_buffer_tuner.c +++ b/src/net_buffer_tuner.c @@ -7,6 +7,7 @@ #include "net_buffer_tuner.skel.legacy.h" #include "net_buffer_tuner.skel.nobtf.h" +#include #include struct tcp_buffer_tuner_bpf *skel; @@ -19,6 +20,8 @@ static struct bpftunable_desc descs[] = { 0, 1 }, { NETDEV_BUDGET, BPFTUNABLE_SYSCTL, "net.core.netdev_budget", 0, 1 }, +{ NETDEV_BUDGET_USECS, BPFTUNABLE_SYSCTL, "net.core.netdev_budget_usecs", + 0, 1 }, }; static struct bpftunable_scenario scenarios[] = { @@ -35,11 +38,13 @@ int init(struct bpftuner *tuner) long cpu_bitmap = 0; long max_backlog = 0; long budget = 0; + long budget_usecs = 0; int err; bpftune_sysctl_read(0, "net.core.flow_limit_cpu_bitmap", &cpu_bitmap); bpftune_sysctl_read(0, "net.core.netdev_max_backlog", &max_backlog); bpftune_sysctl_read(0, "net.core.netdev_budget", &budget); + bpftune_sysctl_read(0, "net.core.netdev_budget_usecs", &budget_usecs); err = bpftuner_bpf_open(net_buffer, tuner); if (err) return err; @@ -52,6 +57,8 @@ int init(struct bpftuner *tuner) max_backlog); bpftuner_bpf_var_set(net_buffer, tuner, netdev_budget, budget); + bpftuner_bpf_var_set(net_buffer, tuner, netdev_budget_usecs, + budget_usecs); err = bpftuner_bpf_attach(net_buffer, tuner); if (err) return err; @@ -109,6 +116,8 @@ void event_handler(struct bpftuner *tuner, event->update[0].new[0]); break; case NETDEV_BUDGET: + if (event->update[0].new[0] > INT_MAX) + break; ret = bpftuner_tunable_sysctl_write(tuner, id, scenario, event->netns_cookie, 1, (long int *)event->update[0].new, @@ -117,9 +126,31 @@ void event_handler(struct bpftuner *tuner, event->update[0].old[0], event->update[0].new[0]); if (!ret) { + long budget_usecs, budget_usecs_new; + /* update value of netdev_budget for BPF program */ bpftuner_bpf_var_set(net_buffer, tuner, netdev_budget, event->update[0].new[0]); + /* need to also update budget_usecs since both + * limit netdev budget and reaching either limit + * triggers time_squeeze. + */ + budget_usecs = bpftuner_bpf_var_get(net_buffer, tuner, + netdev_budget_usecs); + budget_usecs_new = BPFTUNE_GROW_BY_DELTA(budget_usecs); + ret = bpftuner_tunable_sysctl_write(tuner, + NETDEV_BUDGET_USECS, + scenario, + event->netns_cookie, + 1, + &budget_usecs_new, +"To maximize # packets processed per NAPI cycle, change netdev_budget_usecs from (%ld) -> (%ld)\n", + budget_usecs, + budget_usecs_new); + if (!ret) + bpftuner_bpf_var_set(net_buffer, tuner, + netdev_budget_usecs, + budget_usecs_new); } break; } diff --git a/src/net_buffer_tuner.h b/src/net_buffer_tuner.h index 1a89e27..83a2b22 100644 --- a/src/net_buffer_tuner.h +++ b/src/net_buffer_tuner.h @@ -23,6 +23,7 @@ enum net_buffer_tunables { NETDEV_MAX_BACKLOG, FLOW_LIMIT_CPU_BITMAP, NETDEV_BUDGET, + NETDEV_BUDGET_USECS, NET_BUFFER_NUM_TUNABLES, }; From a68df5fd7510bdae7edbf0f680ac117fd9a63047 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Tue, 3 Dec 2024 11:55:44 +0000 Subject: [PATCH 4/4] net_buffer_tuner: correlate netdev budget increases with scheduler correlate netdev budget increases with scheduler wait/run ratio; if tasks have to wait longer it is a signal the system is under load or that netdev budget is too big monopolizing CPUs. Tune down under these circumstances. Signed-off-by: Alan Maguire --- docs/bpftune-net-buffer.rst | 6 +++++ include/bpftune/libbpftune.h | 1 + src/ip_frag_tuner.c | 2 +- src/libbpftune.c | 38 ++++++++++++++++++++++++++++ src/libbpftune.map | 1 + src/net_buffer_tuner.c | 49 ++++++++++++++++++++++++++++-------- src/net_buffer_tuner.h | 1 + 7 files changed, 87 insertions(+), 11 deletions(-) diff --git a/docs/bpftune-net-buffer.rst b/docs/bpftune-net-buffer.rst index 59b6935..fcaa147 100644 --- a/docs/bpftune-net-buffer.rst +++ b/docs/bpftune-net-buffer.rst @@ -29,6 +29,12 @@ DESCRIPTION softnet stat is bumped; if we see increases in time_squeezed, bump netdev_budget/netdev_budget_usecs. + However, we want to limit such increases if they lead to longer + task scheduling wait times, so we monitor the ratio of time tasks + spend waiting versus running across all processors, and if we see + correlations between increases in netdev budget and wait/run ratio + increases, netdev budget is tuned down. + Tunables: - net.core.netdev_max_backlog: maximum per-cpu backlog queue length; diff --git a/include/bpftune/libbpftune.h b/include/bpftune/libbpftune.h index 4417507..f69e4ab 100644 --- a/include/bpftune/libbpftune.h +++ b/include/bpftune/libbpftune.h @@ -292,6 +292,7 @@ void bpftune_sysctl_name_to_path(const char *name, char *path, size_t path_sz); int bpftune_sysctl_read(int netns_fd, const char *name, long *values); int bpftune_sysctl_write(int netns_fd, const char *name, __u8 num_values, long *values); int bpftune_snmpstat_read(unsigned long netns_cookie, int family, const char *name, long *value); +int bpftune_sched_wait_run_percent_read(void); bool bpftune_netns_cookie_supported(void); int bpftune_netns_set(int fd, int *orig_fd, bool quiet); int bpftune_netns_info(int pid, int *fd, unsigned long *cookie); diff --git a/src/ip_frag_tuner.c b/src/ip_frag_tuner.c index 1389a75..80e6e20 100644 --- a/src/ip_frag_tuner.c +++ b/src/ip_frag_tuner.c @@ -92,7 +92,7 @@ void event_handler(struct bpftuner *tuner, corr = corr_compute(&c); bpftune_log(LOG_DEBUG, "covar for '%s' netns %ld (new %ld): %LF ; corr %LF\n", tunable, key.netns_cookie, new, covar_compute(&c), corr); - if (corr > CORR_HIGH_THRESHOLD && scenario == IP_FRAG_THRESHOLD_INCREASE) { + if (corr > CORR_THRESHOLD && scenario == IP_FRAG_THRESHOLD_INCREASE) { scenario = IP_FRAG_THRESHOLD_DECREASE; new = BPFTUNE_SHRINK_BY_DELTA(old); } diff --git a/src/libbpftune.c b/src/libbpftune.c index 73414af..70aa89e 100644 --- a/src/libbpftune.c +++ b/src/libbpftune.c @@ -1095,6 +1095,44 @@ int bpftune_snmpstat_read(unsigned long netns_cookie, int family, return err; } +/* return % of overall wait/run time on all cpus gathered from + * /proc/schedstat ; see https://docs.kernel.org/scheduler/sched-stats.html + * Usually > 100%. + */ +int bpftune_sched_wait_run_percent_read(void) +{ + long running = 0, waiting = 0; + FILE *fp = NULL; + char line[1024]; + int err = 0; + + err = bpftune_cap_add(); + if (err) + return err; + fp = fopen("/proc/schedstat", "r"); + if (!fp) { + err = -errno; + goto out; + } + while (fgets(line, sizeof(line) - 1, fp) != NULL) { + long cpurunning = 0, cpuwaiting = 0, cputimeslices; + + if (sscanf(line, "cpu%*d %*d %*d %*d %*d %*d %*d %ld %ld %ld", + &cpurunning, &cpuwaiting, &cputimeslices) == 3) { + running += cpurunning; + waiting += cpuwaiting; + } + } + bpftune_log(LOG_DEBUG, "sched waiting %ld, running %ld\n", waiting, running); + if (running > 0) + err = (int)((waiting*100)/running); +out: + if (fp) + fclose(fp); + bpftune_cap_drop(); + return err; +} + int bpftuner_tunables_init(struct bpftuner *tuner, unsigned int num_descs, struct bpftunable_desc *descs, unsigned int num_scenarios, diff --git a/src/libbpftune.map b/src/libbpftune.map index c512f42..3f04d4c 100644 --- a/src/libbpftune.map +++ b/src/libbpftune.map @@ -51,6 +51,7 @@ LIBBPFTUNE_0.1.1 { bpftune_sysctl_read; bpftune_sysctl_write; bpftune_snmpstat_read; + bpftune_sched_wait_run_percent_read; bpftune_netns_init_all; bpftune_netns_set; bpftune_netns_info; diff --git a/src/net_buffer_tuner.c b/src/net_buffer_tuner.c index 7ae3337..49b1a20 100644 --- a/src/net_buffer_tuner.c +++ b/src/net_buffer_tuner.c @@ -2,6 +2,7 @@ /* Copyright (c) 2023, Oracle and/or its affiliates. */ #include +#include #include "net_buffer_tuner.h" #include "net_buffer_tuner.skel.h" #include "net_buffer_tuner.skel.legacy.h" @@ -30,7 +31,9 @@ static struct bpftunable_scenario scenarios[] = { { FLOW_LIMIT_CPU_SET, "need to set per-cpu bitmap value", "Need to set flow limit per-cpu to prioritize small flows" }, { NETDEV_BUDGET_INCREASE, "need to increase # of packets processed per NAPI poll", - "Need to increase number of packets processed across network devices during NAPI poll to use all of net.core.netdev_budget_usecs" } + "Need to increase number of packets processed across network devices during NAPI poll to use all of net.core.netdev_budget_usecs" }, +{ NETDEV_BUDGET_DECREASE, "need to decrease # of packets processed per NAPI poll", + "Need to decrease netdev_budget[_usecs] since the ratio of time spent waiting to run versus time spent running for tasks has increased as we have increased netdev budget. This indicates either our budget increases directly let to increased wait times for other tasks, or that general load has increased; either way spending too much time in NAPI processing will hurt system performance." } }; int init(struct bpftuner *tuner) @@ -77,8 +80,12 @@ void event_handler(struct bpftuner *tuner, struct bpftune_event *event, __attribute__((unused))void *ctx) { + long new, budget_usecs, budget_usecs_new; int scenario = event->scenario_id; + struct corr c = { 0 }; + long double corr = 0; const char *tunable; + struct corr_key key; int id, ret; /* netns cookie not supported; ignore */ @@ -116,28 +123,50 @@ void event_handler(struct bpftuner *tuner, event->update[0].new[0]); break; case NETDEV_BUDGET: - if (event->update[0].new[0] > INT_MAX) + new = event->update[0].new[0]; + if (new > INT_MAX) break; + budget_usecs = bpftuner_bpf_var_get(net_buffer, tuner, + netdev_budget_usecs); + budget_usecs_new = BPFTUNE_GROW_BY_DELTA(budget_usecs); + + ret = bpftune_sched_wait_run_percent_read(); + bpftune_log(LOG_DEBUG, "sched wait-run percent : %d\n", ret); + if (ret > 0) { + key.id = (__u64)id; + key.netns_cookie = event->netns_cookie; + if (corr_update_user(tuner->corr_map_fd, key.id, + key.netns_cookie, + (__u64)new, (__u64)ret)) + bpftune_log(LOG_DEBUG, "corr map fd %d update failed %d\n", + tuner->corr_map_fd, errno); + } + if (!bpf_map_lookup_elem(tuner->corr_map_fd, &key, &c)) { + corr = corr_compute(&c); + bpftune_log(LOG_DEBUG, "covar for '%s' netns %ld (new %ld): %LF; corr %LF\n", + tunable, key.netns_cookie, new, + covar_compute(&c), corr); + if (corr > CORR_THRESHOLD) { + new = BPFTUNE_SHRINK_BY_DELTA(event->update[0].old[0]); + budget_usecs_new = BPFTUNE_SHRINK_BY_DELTA(budget_usecs); + scenario = NETDEV_BUDGET_DECREASE; + } + } ret = bpftuner_tunable_sysctl_write(tuner, id, scenario, event->netns_cookie, 1, - (long int *)event->update[0].new, + (long int *)&new, "To maximize # packets processed per NAPI cycle, change %s from (%ld) -> (%ld)\n", tunable, event->update[0].old[0], - event->update[0].new[0]); + new); if (!ret) { - long budget_usecs, budget_usecs_new; - /* update value of netdev_budget for BPF program */ bpftuner_bpf_var_set(net_buffer, tuner, netdev_budget, - event->update[0].new[0]); + new); /* need to also update budget_usecs since both * limit netdev budget and reaching either limit * triggers time_squeeze. */ - budget_usecs = bpftuner_bpf_var_get(net_buffer, tuner, - netdev_budget_usecs); - budget_usecs_new = BPFTUNE_GROW_BY_DELTA(budget_usecs); ret = bpftuner_tunable_sysctl_write(tuner, NETDEV_BUDGET_USECS, scenario, diff --git a/src/net_buffer_tuner.h b/src/net_buffer_tuner.h index 83a2b22..bc3cb9d 100644 --- a/src/net_buffer_tuner.h +++ b/src/net_buffer_tuner.h @@ -31,4 +31,5 @@ enum net_buffer_scenarios { NETDEV_MAX_BACKLOG_INCREASE, FLOW_LIMIT_CPU_SET, NETDEV_BUDGET_INCREASE, + NETDEV_BUDGET_DECREASE, };