diff --git a/src/net_buffer_tuner.bpf.c b/src/net_buffer_tuner.bpf.c index d7eb005..bef6f0e 100644 --- a/src/net_buffer_tuner.bpf.c +++ b/src/net_buffer_tuner.bpf.c @@ -20,8 +20,6 @@ #include #include "net_buffer_tuner.h" -extern const void netdev_max_backlog __ksym; - #ifndef NET_RX_DROP #define NET_RX_DROP 1 #endif @@ -31,6 +29,8 @@ __u64 drop_interval_start = 0; __u64 flow_limit_cpu_bitmap = 0; +int netdev_max_backlog = 0; + #ifdef BPFTUNE_LEGACY SEC("kretprobe/enqueue_to_backlog") int BPF_KRETPROBE(bpftune_enqueue_to_backlog, int ret) @@ -40,9 +40,9 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, unsigned int *qtail, int ret) #endif { + int max_backlog = netdev_max_backlog; struct bpftune_event event = { 0 }; long old[3], new[3]; - int max_backlog, *max_backlogp = (int *)&netdev_max_backlog; __u64 time, cpubit; /* a high-frequency event so bail early if we can... */ @@ -54,10 +54,7 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu, /* only sample subset of drops to reduce overhead. */ if ((drop_count % 4) != 0) return 0; - if (bpf_probe_read_kernel(&max_backlog, sizeof(max_backlog), - max_backlogp)) - return 0; - + /* if we drop more than 1/16 of the backlog queue size/min, * increase backlog queue size. This means as the queue size * increases, the likliehood of hitting that limit decreases. diff --git a/src/net_buffer_tuner.c b/src/net_buffer_tuner.c index 7f84f9a..359af05 100644 --- a/src/net_buffer_tuner.c +++ b/src/net_buffer_tuner.c @@ -29,10 +29,11 @@ static struct bpftunable_scenario scenarios[] = { int init(struct bpftuner *tuner) { long cpu_bitmap = 0; + long max_backlog = 0; int err; bpftune_sysctl_read(0, "net.core.flow_limit_cpu_bitmap", &cpu_bitmap); - + bpftune_sysctl_read(0, "net.core.netdev_max_backlog", &max_backlog); err = bpftuner_bpf_open(net_buffer, tuner); if (err) return err; @@ -41,6 +42,8 @@ int init(struct bpftuner *tuner) return err; bpftuner_bpf_var_set(net_buffer, tuner, flow_limit_cpu_bitmap, cpu_bitmap); + bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog, + max_backlog); err = bpftuner_bpf_attach(net_buffer, tuner); if (err) return err; @@ -82,6 +85,9 @@ void event_handler(struct bpftuner *tuner, tunable, event->update[0].old[0], event->update[0].new[0]); + /* update value of netdev_max_backlog for BPF program */ + bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog, + event->update[0].new[0]); break; case FLOW_LIMIT_CPU_BITMAP: bpftuner_tunable_sysctl_write(tuner, id, scenario,