Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 4 additions & 7 deletions src/net_buffer_tuner.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@
#include <bpftune/bpftune.bpf.h>
#include "net_buffer_tuner.h"

extern const void netdev_max_backlog __ksym;

#ifndef NET_RX_DROP
#define NET_RX_DROP 1
#endif
Expand All @@ -31,6 +29,8 @@ __u64 drop_interval_start = 0;

__u64 flow_limit_cpu_bitmap = 0;

int netdev_max_backlog = 0;

#ifdef BPFTUNE_LEGACY
SEC("kretprobe/enqueue_to_backlog")
int BPF_KRETPROBE(bpftune_enqueue_to_backlog, int ret)
Expand All @@ -40,9 +40,9 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu,
unsigned int *qtail, int ret)
#endif
{
int max_backlog = netdev_max_backlog;
struct bpftune_event event = { 0 };
long old[3], new[3];
int max_backlog, *max_backlogp = (int *)&netdev_max_backlog;
__u64 time, cpubit;

/* a high-frequency event so bail early if we can... */
Expand All @@ -54,10 +54,7 @@ int BPF_PROG(bpftune_enqueue_to_backlog, struct sk_buff *skb, int cpu,
/* only sample subset of drops to reduce overhead. */
if ((drop_count % 4) != 0)
return 0;
if (bpf_probe_read_kernel(&max_backlog, sizeof(max_backlog),
max_backlogp))
return 0;


/* if we drop more than 1/16 of the backlog queue size/min,
* increase backlog queue size. This means as the queue size
* increases, the likliehood of hitting that limit decreases.
Expand Down
8 changes: 7 additions & 1 deletion src/net_buffer_tuner.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,11 @@ static struct bpftunable_scenario scenarios[] = {
int init(struct bpftuner *tuner)
{
long cpu_bitmap = 0;
long max_backlog = 0;
int err;

bpftune_sysctl_read(0, "net.core.flow_limit_cpu_bitmap", &cpu_bitmap);

bpftune_sysctl_read(0, "net.core.netdev_max_backlog", &max_backlog);
err = bpftuner_bpf_open(net_buffer, tuner);
if (err)
return err;
Expand All @@ -41,6 +42,8 @@ int init(struct bpftuner *tuner)
return err;
bpftuner_bpf_var_set(net_buffer, tuner, flow_limit_cpu_bitmap,
cpu_bitmap);
bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog,
max_backlog);
err = bpftuner_bpf_attach(net_buffer, tuner);
if (err)
return err;
Expand Down Expand Up @@ -82,6 +85,9 @@ void event_handler(struct bpftuner *tuner,
tunable,
event->update[0].old[0],
event->update[0].new[0]);
/* update value of netdev_max_backlog for BPF program */
bpftuner_bpf_var_set(net_buffer, tuner, netdev_max_backlog,
event->update[0].new[0]);
break;
case FLOW_LIMIT_CPU_BITMAP:
bpftuner_tunable_sysctl_write(tuner, id, scenario,
Expand Down