nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
- struct tc_cls_bpf_offload cmd = {
- .prog = prog,
- };
+ bool running, xdp_running;
int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) {
- if (!nn->dp.bpf_offload_xdp)
- return prog ? -EBUSY : 0;
- cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
- } else {
- if (!prog)
- return 0;
- cmd.command = TC_CLSBPF_ADD;
- }
+ running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
+ xdp_running = running && nn->dp.bpf_offload_xdp;
+
+ if (!prog && !xdp_running)
+ return 0;
+ if (prog && running && !xdp_running)
+ return -EBUSY;
- ret = nfp_net_bpf_offload(nn, &cmd);
+ ret = nfp_net_bpf_offload(nn, prog, running, true);
/* Stop offload if replace not possible */
- if (ret && cmd.command == TC_CLSBPF_REPLACE)
+ if (ret && prog)
nfp_bpf_xdp_offload(app, nn, NULL);
+
nn->dp.bpf_offload_xdp = prog && !ret;
return ret;
}
{
struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = cb_priv;
+ bool skip_sw;
+
+ if (type != TC_SETUP_CLSBPF ||
+ !tc_can_offload(nn->dp.netdev) ||
+ !nfp_net_ebpf_capable(nn) ||
+ cls_bpf->common.protocol != htons(ETH_P_ALL) ||
+ cls_bpf->common.chain_index)
+ return -EOPNOTSUPP;
+ if (nn->dp.bpf_offload_xdp)
+ return -EBUSY;
- if (!tc_can_offload(nn->dp.netdev))
+ /* Only support TC direct action */
+ if (!cls_bpf->exts_integrated ||
+ tcf_exts_has_actions(cls_bpf->exts)) {
+ nn_err(nn, "only direct action with no legacy actions supported\n");
return -EOPNOTSUPP;
+ }
- switch (type) {
- case TC_SETUP_CLSBPF:
- if (!nfp_net_ebpf_capable(nn) ||
- cls_bpf->common.protocol != htons(ETH_P_ALL) ||
- cls_bpf->common.chain_index)
- return -EOPNOTSUPP;
- if (nn->dp.bpf_offload_xdp)
- return -EBUSY;
-
- /* Only support TC direct action */
- if (!cls_bpf->exts_integrated ||
- tcf_exts_has_actions(cls_bpf->exts)) {
- nn_err(nn, "only direct action with no legacy actions supported\n");
- return -EOPNOTSUPP;
- }
-
- return nfp_net_bpf_offload(nn, cls_bpf);
+ skip_sw = !!(cls_bpf->gen_flags & TCA_CLS_FLAGS_SKIP_SW);
+
+ switch (cls_bpf->command) {
+ case TC_CLSBPF_REPLACE:
+ return nfp_net_bpf_offload(nn, cls_bpf->prog, true, !skip_sw);
+ case TC_CLSBPF_ADD:
+ return nfp_net_bpf_offload(nn, cls_bpf->prog, false, !skip_sw);
+ case TC_CLSBPF_DESTROY:
+ return nfp_net_bpf_offload(nn, NULL, true, !skip_sw);
default:
return -EOPNOTSUPP;
}
#include "../nfp_net.h"
static int
-nfp_net_bpf_offload_prepare(struct nfp_net *nn,
- struct tc_cls_bpf_offload *cls_bpf,
+nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
struct nfp_bpf_result *res,
void **code, dma_addr_t *dma_addr, u16 max_instr)
{
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
- if (cls_bpf->prog->aux->stack_depth > stack_size) {
+ if (prog->aux->stack_depth > stack_size) {
nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
- cls_bpf->prog->aux->stack_depth, stack_size);
+ prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP;
}
if (!*code)
return -ENOMEM;
- ret = nfp_bpf_jit(cls_bpf->prog, *code, start_off, done_off,
- max_instr, res);
+ ret = nfp_bpf_jit(prog, *code, start_off, done_off, max_instr, res);
if (ret)
goto out;
}
static void
-nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
+nfp_net_bpf_load_and_start(struct nfp_net *nn, bool sw_fallback,
void *code, dma_addr_t dma_addr,
unsigned int code_sz, unsigned int n_instr)
{
int err;
- nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
+ nn->dp.bpf_offload_skip_sw = !sw_fallback;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
-int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
+int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
+ bool old_prog, bool sw_fallback)
{
struct nfp_bpf_result res;
dma_addr_t dma_addr;
void *code;
int err;
+ /* There is nothing stopping us from implementing seamless
+ * replace but the simple method of loading I adopted in
+ * the firmware does not handle atomic replace (i.e. we have to
+ * stop the BPF offload and re-enable it). Leaking-in a few
+ * frames which didn't have BPF applied in the hardware should
+ * be fine if software fallback is available, though.
+ */
+ if (prog && old_prog && nn->dp.bpf_offload_skip_sw)
+ return -EBUSY;
+
+ /* Something else is loaded, different program type? */
+ if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
+ return -EBUSY;
+
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
+ code = NULL;
- switch (cls_bpf->command) {
- case TC_CLSBPF_REPLACE:
- /* There is nothing stopping us from implementing seamless
- * replace but the simple method of loading I adopted in
- * the firmware does not handle atomic replace (i.e. we have to
- * stop the BPF offload and re-enable it). Leaking-in a few
- * frames which didn't have BPF applied in the hardware should
- * be fine if software fallback is available, though.
- */
- if (nn->dp.bpf_offload_skip_sw)
- return -EBUSY;
-
- err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
+ if (prog) {
+ err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code,
&dma_addr, max_instr);
if (err)
return err;
+ }
+ if (old_prog)
nfp_net_bpf_stop(nn);
- nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
- dma_addr, max_instr * sizeof(u64),
- res.n_instr);
- return 0;
-
- case TC_CLSBPF_ADD:
- if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
- return -EBUSY;
-
- err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
- &dma_addr, max_instr);
- if (err)
- return err;
- nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
+ if (prog)
+ nfp_net_bpf_load_and_start(nn, sw_fallback, code,
dma_addr, max_instr * sizeof(u64),
res.n_instr);
- return 0;
- case TC_CLSBPF_DESTROY:
- return nfp_net_bpf_stop(nn);
-
- default:
- return -EOPNOTSUPP;
- }
+ return 0;
}