mirror of https://github.com/torvalds/linux.git
bpf-fixes
-----BEGIN PGP SIGNATURE-----
iQIzBAABCAAdFiEE+soXsSLHKoYyzcli6rmadz2vbToFAmjB/6gACgkQ6rmadz2v
bToSCg//Z8Q2ToTV/BOLTzFYLvcTm2YRlqqIe3SFxyxLQCIhC0kxQAT94baVQHky
/6ASbPjDWXdGVHNoMopA6lpMx22Tq4xi6qO5fzJHDuSqh5KTi8l5/GyJeA3egPzD
7RIvKvvgePpCx0xm9rm5O5vvUeFrsxhQPRRiN/fsOibiTJjBpRAJDp9k+pvnK6mb
HaZcHF+In5Vg7XozuHAUMzsp+4njzdLrMXL2Q54o2MrIoeBg8/oAnhLujskGMnXK
mgUA+skW42IEkw+TYUu9888/5PMDkto3BZIx0plcAIVAIvcU5BFzLt11llQswgVl
q740k50oRKrmwHyEVDwugV7WeGQMks48lMHtLKytYmdEhdTfEYUKHeBpcI87fUYy
IpOdSUT49nBxOmGl59ccBcdzsndTjo7Zrl7dMf4umN0SSjfdohwj0uu7rmZCaOdd
m/TxH13Ae7na4QzVx0N911qxBYw07uYNiq3Ati+x327ySozvvNfLIYK/sS/clJkd
lOpz3kpjwgV+PUfv2NBqEJm4nSPTtW7fiEQ8p/yBvK90nB6NnHIbq9a2rPBKeDKx
RpkDB9nhJ1J6OMKWkUDasMiP5tAXt9RrI+la/CgBxMcP/G4HxH6yf22Mf3Hzhe8V
UfjAgHqXvmrjqpgIbO0AVkfwDvlOM37DGvY0H3bMFeOXCk0DDw4=
=09/9
-----END PGP SIGNATURE-----
Merge tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Pull bpf fixes from Alexei Starovoitov:
"A number of fixes accumulated due to summer vacations
- Fix out-of-bounds dynptr write in bpf_crypto_crypt() kfunc which
was misidentified as a security issue (Daniel Borkmann)
- Update the list of BPF selftests maintainers (Eduard Zingerman)
- Fix selftests warnings with icecc compiler (Ilya Leoshkevich)
- Disable XDP/cpumap direct return optimization (Jesper Dangaard
Brouer)
- Fix unexpected get_helper_proto() result in unusual configuration
BPF_SYSCALL=y and BPF_EVENTS=n (Jiri Olsa)
- Allow fallback to interpreter when JIT support is limited (KaFai
Wan)
- Fix rqspinlock and choose trylock fallback for NMI waiters. Pick
the simplest fix. More involved fix is targeted bpf-next (Kumar
Kartikeya Dwivedi)
- Fix cleanup when tcp_bpf_send_verdict() fails to allocate
psock->cork (Kuniyuki Iwashima)
- Disallow bpf_timer in PREEMPT_RT for now. Proper solution is being
discussed for bpf-next. (Leon Hwang)
- Fix XSK cq descriptor production (Maciej Fijalkowski)
- Tell memcg to use allow_spinning=false path in bpf_timer_init() to
avoid lockup in cgroup_file_notify() (Peilin Ye)
- Fix bpf_strnstr() to handle suffix match cases (Rong Tao)"
* tag 'bpf-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
selftests/bpf: Skip timer cases when bpf_timer is not supported
bpf: Reject bpf_timer for PREEMPT_RT
tcp_bpf: Call sk_msg_free() when tcp_bpf_send_verdict() fails to allocate psock->cork.
bpf: Tell memcg to use allow_spinning=false path in bpf_timer_init()
bpf: Allow fall back to interpreter for programs with stack size <= 512
rqspinlock: Choose trylock fallback for NMI waiters
xsk: Fix immature cq descriptor production
bpf: Update the list of BPF selftests maintainers
selftests/bpf: Add tests for bpf_strnstr
selftests/bpf: Fix "expression result unused" warnings with icecc
bpf: Fix bpf_strnstr() to handle suffix match cases better
selftests/bpf: Extend crypto_sanity selftest with invalid dst buffer
bpf: Fix out-of-bounds dynptr write in bpf_crypto_crypt
bpf: Check the helper function is valid in get_helper_proto
bpf, cpumap: Disable page_pool direct xdp_return need larger scope
This commit is contained in:
commit
02ffd6f89c
|
|
@ -4683,7 +4683,6 @@ F: security/bpf/
|
|||
BPF [SELFTESTS] (Test Runners & Infrastructure)
|
||||
M: Andrii Nakryiko <andrii@kernel.org>
|
||||
M: Eduard Zingerman <eddyz87@gmail.com>
|
||||
R: Mykola Lysenko <mykolal@fb.com>
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
F: tools/testing/selftests/bpf/
|
||||
|
|
|
|||
|
|
@ -62,3 +62,4 @@ CFLAGS_REMOVE_bpf_lru_list.o = $(CC_FLAGS_FTRACE)
|
|||
CFLAGS_REMOVE_queue_stack_maps.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_lpm_trie.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_ringbuf.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_rqspinlock.o = $(CC_FLAGS_FTRACE)
|
||||
|
|
|
|||
|
|
@ -2366,8 +2366,7 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
|
|||
const struct bpf_insn *insn)
|
||||
{
|
||||
/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
|
||||
* is not working properly, or interpreter is being used when
|
||||
* prog->jit_requested is not 0, so warn about it!
|
||||
* is not working properly, so warn about it!
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
|
|
@ -2468,8 +2467,9 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void bpf_prog_select_func(struct bpf_prog *fp)
|
||||
static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
|
||||
{
|
||||
bool select_interpreter = false;
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
|
||||
u32 idx = (round_up(stack_depth, 32) / 32) - 1;
|
||||
|
|
@ -2478,15 +2478,16 @@ static void bpf_prog_select_func(struct bpf_prog *fp)
|
|||
* But for non-JITed programs, we don't need bpf_func, so no bounds
|
||||
* check needed.
|
||||
*/
|
||||
if (!fp->jit_requested &&
|
||||
!WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
|
||||
if (idx < ARRAY_SIZE(interpreters)) {
|
||||
fp->bpf_func = interpreters[idx];
|
||||
select_interpreter = true;
|
||||
} else {
|
||||
fp->bpf_func = __bpf_prog_ret0_warn;
|
||||
}
|
||||
#else
|
||||
fp->bpf_func = __bpf_prog_ret0_warn;
|
||||
#endif
|
||||
return select_interpreter;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -2505,7 +2506,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
|||
/* In case of BPF to BPF calls, verifier did all the prep
|
||||
* work with regards to JITing, etc.
|
||||
*/
|
||||
bool jit_needed = fp->jit_requested;
|
||||
bool jit_needed = false;
|
||||
|
||||
if (fp->bpf_func)
|
||||
goto finalize;
|
||||
|
|
@ -2514,7 +2515,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
|||
bpf_prog_has_kfunc_call(fp))
|
||||
jit_needed = true;
|
||||
|
||||
bpf_prog_select_func(fp);
|
||||
if (!bpf_prog_select_interpreter(fp))
|
||||
jit_needed = true;
|
||||
|
||||
/* eBPF JITs can rewrite the program in case constant
|
||||
* blinding is active. However, in case of error during
|
||||
|
|
@ -3024,7 +3026,10 @@ EXPORT_SYMBOL_GPL(bpf_event_output);
|
|||
|
||||
/* Always built-in helper functions. */
|
||||
const struct bpf_func_proto bpf_tail_call_proto = {
|
||||
.func = NULL,
|
||||
/* func is unused for tail_call, we set it to pass the
|
||||
* get_helper_proto check
|
||||
*/
|
||||
.func = BPF_PTR_POISON,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_VOID,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
|
|
|
|||
|
|
@ -186,7 +186,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
|
|||
struct xdp_buff xdp;
|
||||
int i, nframes = 0;
|
||||
|
||||
xdp_set_return_frame_no_direct();
|
||||
xdp.rxq = &rxq;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
|
|
@ -231,7 +230,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
|
|||
}
|
||||
}
|
||||
|
||||
xdp_clear_return_frame_no_direct();
|
||||
stats->pass += nframes;
|
||||
|
||||
return nframes;
|
||||
|
|
@ -255,6 +253,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
|
|||
|
||||
rcu_read_lock();
|
||||
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
|
||||
xdp_set_return_frame_no_direct();
|
||||
|
||||
ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats);
|
||||
if (unlikely(ret->skb_n))
|
||||
|
|
@ -264,6 +263,7 @@ static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
|
|||
if (stats->redirect)
|
||||
xdp_do_flush();
|
||||
|
||||
xdp_clear_return_frame_no_direct();
|
||||
bpf_net_ctx_clear(bpf_net_ctx);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
|||
|
|
@ -278,7 +278,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
|
|||
siv_len = siv ? __bpf_dynptr_size(siv) : 0;
|
||||
src_len = __bpf_dynptr_size(src);
|
||||
dst_len = __bpf_dynptr_size(dst);
|
||||
if (!src_len || !dst_len)
|
||||
if (!src_len || !dst_len || src_len > dst_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (siv_len != ctx->siv_len)
|
||||
|
|
|
|||
|
|
@ -1274,8 +1274,11 @@ static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* allocate hrtimer via map_kmalloc to use memcg accounting */
|
||||
cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node);
|
||||
/* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
|
||||
* kmalloc_nolock() is available, avoid locking issues by using
|
||||
* __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
|
||||
*/
|
||||
cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
|
||||
if (!cb) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
@ -3664,10 +3667,17 @@ __bpf_kfunc int bpf_strnstr(const char *s1__ign, const char *s2__ign, size_t len
|
|||
|
||||
guard(pagefault)();
|
||||
for (i = 0; i < XATTR_SIZE_MAX; i++) {
|
||||
for (j = 0; i + j < len && j < XATTR_SIZE_MAX; j++) {
|
||||
for (j = 0; i + j <= len && j < XATTR_SIZE_MAX; j++) {
|
||||
__get_kernel_nofault(&c2, s2__ign + j, char, err_out);
|
||||
if (c2 == '\0')
|
||||
return i;
|
||||
/*
|
||||
* We allow reading an extra byte from s2 (note the
|
||||
* `i + j <= len` above) to cover the case when s2 is
|
||||
* a suffix of the first len chars of s1.
|
||||
*/
|
||||
if (i + j == len)
|
||||
break;
|
||||
__get_kernel_nofault(&c1, s1__ign + j, char, err_out);
|
||||
if (c1 == '\0')
|
||||
return -ENOENT;
|
||||
|
|
|
|||
|
|
@ -471,7 +471,7 @@ queue:
|
|||
* any MCS node. This is not the most elegant solution, but is
|
||||
* simple enough.
|
||||
*/
|
||||
if (unlikely(idx >= _Q_MAX_NODES)) {
|
||||
if (unlikely(idx >= _Q_MAX_NODES || in_nmi())) {
|
||||
lockevent_inc(lock_no_node);
|
||||
RES_RESET_TIMEOUT(ts, RES_DEF_TIMEOUT);
|
||||
while (!queued_spin_trylock(lock)) {
|
||||
|
|
|
|||
|
|
@ -8547,6 +8547,10 @@ static int process_timer_func(struct bpf_verifier_env *env, int regno,
|
|||
verifier_bug(env, "Two map pointers in a timer helper");
|
||||
return -EFAULT;
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
|
||||
verbose(env, "bpf_timer cannot be used for PREEMPT_RT.\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
meta->map_uid = reg->map_uid;
|
||||
meta->map_ptr = map;
|
||||
return 0;
|
||||
|
|
@ -11354,7 +11358,7 @@ static int get_helper_proto(struct bpf_verifier_env *env, int func_id,
|
|||
return -EINVAL;
|
||||
|
||||
*ptr = env->ops->get_func_proto(func_id, env->prog);
|
||||
return *ptr ? 0 : -EINVAL;
|
||||
return *ptr && (*ptr)->func ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
||||
|
|
|
|||
|
|
@ -408,8 +408,11 @@ more_data:
|
|||
if (!psock->cork) {
|
||||
psock->cork = kzalloc(sizeof(*psock->cork),
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!psock->cork)
|
||||
if (!psock->cork) {
|
||||
sk_msg_free(sk, msg);
|
||||
*copied = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
memcpy(psock->cork, msg, sizeof(*msg));
|
||||
return 0;
|
||||
|
|
|
|||
113
net/xdp/xsk.c
113
net/xdp/xsk.c
|
|
@ -36,6 +36,20 @@
|
|||
#define TX_BATCH_SIZE 32
|
||||
#define MAX_PER_SOCKET_BUDGET 32
|
||||
|
||||
struct xsk_addr_node {
|
||||
u64 addr;
|
||||
struct list_head addr_node;
|
||||
};
|
||||
|
||||
struct xsk_addr_head {
|
||||
u32 num_descs;
|
||||
struct list_head addrs_list;
|
||||
};
|
||||
|
||||
static struct kmem_cache *xsk_tx_generic_cache;
|
||||
|
||||
#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
|
||||
|
||||
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
|
||||
{
|
||||
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
|
||||
|
|
@ -532,24 +546,43 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
|
|||
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
|
||||
}
|
||||
|
||||
static int xsk_cq_reserve_addr_locked(struct xsk_buff_pool *pool, u64 addr)
|
||||
static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&pool->cq_lock, flags);
|
||||
ret = xskq_prod_reserve_addr(pool->cq, addr);
|
||||
ret = xskq_prod_reserve(pool->cq);
|
||||
spin_unlock_irqrestore(&pool->cq_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xsk_cq_submit_locked(struct xsk_buff_pool *pool, u32 n)
|
||||
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct xsk_addr_node *pos, *tmp;
|
||||
u32 descs_processed = 0;
|
||||
unsigned long flags;
|
||||
u32 idx;
|
||||
|
||||
spin_lock_irqsave(&pool->cq_lock, flags);
|
||||
xskq_prod_submit_n(pool->cq, n);
|
||||
idx = xskq_get_prod(pool->cq);
|
||||
|
||||
xskq_prod_write_addr(pool->cq, idx,
|
||||
(u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
|
||||
descs_processed++;
|
||||
|
||||
if (unlikely(XSKCB(skb)->num_descs > 1)) {
|
||||
list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
|
||||
xskq_prod_write_addr(pool->cq, idx + descs_processed,
|
||||
pos->addr);
|
||||
descs_processed++;
|
||||
list_del(&pos->addr_node);
|
||||
kmem_cache_free(xsk_tx_generic_cache, pos);
|
||||
}
|
||||
}
|
||||
xskq_prod_submit_n(pool->cq, descs_processed);
|
||||
spin_unlock_irqrestore(&pool->cq_lock, flags);
|
||||
}
|
||||
|
||||
|
|
@ -562,9 +595,14 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
|
|||
spin_unlock_irqrestore(&pool->cq_lock, flags);
|
||||
}
|
||||
|
||||
static void xsk_inc_num_desc(struct sk_buff *skb)
|
||||
{
|
||||
XSKCB(skb)->num_descs++;
|
||||
}
|
||||
|
||||
static u32 xsk_get_num_desc(struct sk_buff *skb)
|
||||
{
|
||||
return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
|
||||
return XSKCB(skb)->num_descs;
|
||||
}
|
||||
|
||||
static void xsk_destruct_skb(struct sk_buff *skb)
|
||||
|
|
@ -576,23 +614,33 @@ static void xsk_destruct_skb(struct sk_buff *skb)
|
|||
*compl->tx_timestamp = ktime_get_tai_fast_ns();
|
||||
}
|
||||
|
||||
xsk_cq_submit_locked(xdp_sk(skb->sk)->pool, xsk_get_num_desc(skb));
|
||||
xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
|
||||
sock_wfree(skb);
|
||||
}
|
||||
|
||||
static void xsk_set_destructor_arg(struct sk_buff *skb)
|
||||
static void xsk_set_destructor_arg(struct sk_buff *skb, u64 addr)
|
||||
{
|
||||
long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
|
||||
|
||||
skb_shinfo(skb)->destructor_arg = (void *)num;
|
||||
BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
|
||||
INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
|
||||
XSKCB(skb)->num_descs = 0;
|
||||
skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
|
||||
}
|
||||
|
||||
static void xsk_consume_skb(struct sk_buff *skb)
|
||||
{
|
||||
struct xdp_sock *xs = xdp_sk(skb->sk);
|
||||
u32 num_descs = xsk_get_num_desc(skb);
|
||||
struct xsk_addr_node *pos, *tmp;
|
||||
|
||||
if (unlikely(num_descs > 1)) {
|
||||
list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
|
||||
list_del(&pos->addr_node);
|
||||
kmem_cache_free(xsk_tx_generic_cache, pos);
|
||||
}
|
||||
}
|
||||
|
||||
skb->destructor = sock_wfree;
|
||||
xsk_cq_cancel_locked(xs->pool, xsk_get_num_desc(skb));
|
||||
xsk_cq_cancel_locked(xs->pool, num_descs);
|
||||
/* Free skb without triggering the perf drop trace */
|
||||
consume_skb(skb);
|
||||
xs->skb = NULL;
|
||||
|
|
@ -609,6 +657,7 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
|
|||
{
|
||||
struct xsk_buff_pool *pool = xs->pool;
|
||||
u32 hr, len, ts, offset, copy, copied;
|
||||
struct xsk_addr_node *xsk_addr;
|
||||
struct sk_buff *skb = xs->skb;
|
||||
struct page *page;
|
||||
void *buffer;
|
||||
|
|
@ -623,6 +672,19 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
|
|||
return ERR_PTR(err);
|
||||
|
||||
skb_reserve(skb, hr);
|
||||
|
||||
xsk_set_destructor_arg(skb, desc->addr);
|
||||
} else {
|
||||
xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
|
||||
if (!xsk_addr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* in case of -EOVERFLOW that could happen below,
|
||||
* xsk_consume_skb() will release this node as whole skb
|
||||
* would be dropped, which implies freeing all list elements
|
||||
*/
|
||||
xsk_addr->addr = desc->addr;
|
||||
list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
|
||||
}
|
||||
|
||||
addr = desc->addr;
|
||||
|
|
@ -694,8 +756,11 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
|
|||
err = skb_store_bits(skb, 0, buffer, len);
|
||||
if (unlikely(err))
|
||||
goto free_err;
|
||||
|
||||
xsk_set_destructor_arg(skb, desc->addr);
|
||||
} else {
|
||||
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
struct xsk_addr_node *xsk_addr;
|
||||
struct page *page;
|
||||
u8 *vaddr;
|
||||
|
||||
|
|
@ -710,12 +775,22 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
|
|||
goto free_err;
|
||||
}
|
||||
|
||||
xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
|
||||
if (!xsk_addr) {
|
||||
__free_page(page);
|
||||
err = -ENOMEM;
|
||||
goto free_err;
|
||||
}
|
||||
|
||||
vaddr = kmap_local_page(page);
|
||||
memcpy(vaddr, buffer, len);
|
||||
kunmap_local(vaddr);
|
||||
|
||||
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
|
||||
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
|
||||
|
||||
xsk_addr->addr = desc->addr;
|
||||
list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
|
||||
}
|
||||
|
||||
if (first_frag && desc->options & XDP_TX_METADATA) {
|
||||
|
|
@ -759,7 +834,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
|
|||
skb->mark = READ_ONCE(xs->sk.sk_mark);
|
||||
skb->destructor = xsk_destruct_skb;
|
||||
xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
|
||||
xsk_set_destructor_arg(skb);
|
||||
xsk_inc_num_desc(skb);
|
||||
|
||||
return skb;
|
||||
|
||||
|
|
@ -769,7 +844,7 @@ free_err:
|
|||
|
||||
if (err == -EOVERFLOW) {
|
||||
/* Drop the packet */
|
||||
xsk_set_destructor_arg(xs->skb);
|
||||
xsk_inc_num_desc(xs->skb);
|
||||
xsk_drop_skb(xs->skb);
|
||||
xskq_cons_release(xs->tx);
|
||||
} else {
|
||||
|
|
@ -812,7 +887,7 @@ static int __xsk_generic_xmit(struct sock *sk)
|
|||
* if there is space in it. This avoids having to implement
|
||||
* any buffering in the Tx path.
|
||||
*/
|
||||
err = xsk_cq_reserve_addr_locked(xs->pool, desc.addr);
|
||||
err = xsk_cq_reserve_locked(xs->pool);
|
||||
if (err) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
|
|
@ -1815,8 +1890,18 @@ static int __init xsk_init(void)
|
|||
if (err)
|
||||
goto out_pernet;
|
||||
|
||||
xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
|
||||
sizeof(struct xsk_addr_node),
|
||||
0, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!xsk_tx_generic_cache) {
|
||||
err = -ENOMEM;
|
||||
goto out_unreg_notif;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unreg_notif:
|
||||
unregister_netdevice_notifier(&xsk_netdev_notifier);
|
||||
out_pernet:
|
||||
unregister_pernet_subsys(&xsk_net_ops);
|
||||
out_sk:
|
||||
|
|
|
|||
|
|
@ -344,6 +344,11 @@ static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
|
|||
|
||||
/* Functions for producers */
|
||||
|
||||
static inline u32 xskq_get_prod(struct xsk_queue *q)
|
||||
{
|
||||
return READ_ONCE(q->ring->producer);
|
||||
}
|
||||
|
||||
static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
|
||||
{
|
||||
u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
|
||||
|
|
@ -390,6 +395,13 @@ static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void xskq_prod_write_addr(struct xsk_queue *q, u32 idx, u64 addr)
|
||||
{
|
||||
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
|
||||
|
||||
ring->desc[idx & q->ring_mask] = addr;
|
||||
}
|
||||
|
||||
static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
|
||||
u32 nb_entries)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -124,6 +124,10 @@ void test_free_timer(void)
|
|||
int err;
|
||||
|
||||
skel = free_timer__open_and_load();
|
||||
if (!skel && errno == EOPNOTSUPP) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
if (!ASSERT_OK_PTR(skel, "open_load"))
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -86,6 +86,10 @@ void serial_test_timer(void)
|
|||
int err;
|
||||
|
||||
timer_skel = timer__open_and_load();
|
||||
if (!timer_skel && errno == EOPNOTSUPP) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -12,6 +12,10 @@ static void test_timer_crash_mode(int mode)
|
|||
struct timer_crash *skel;
|
||||
|
||||
skel = timer_crash__open_and_load();
|
||||
if (!skel && errno == EOPNOTSUPP) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
|
||||
return;
|
||||
skel->bss->pid = getpid();
|
||||
|
|
|
|||
|
|
@ -59,6 +59,10 @@ void test_timer_lockup(void)
|
|||
}
|
||||
|
||||
skel = timer_lockup__open_and_load();
|
||||
if (!skel && errno == EOPNOTSUPP) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
if (!ASSERT_OK_PTR(skel, "timer_lockup__open_and_load"))
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -65,6 +65,10 @@ void serial_test_timer_mim(void)
|
|||
goto cleanup;
|
||||
|
||||
timer_skel = timer_mim__open_and_load();
|
||||
if (!timer_skel && errno == EOPNOTSUPP) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
|
||||
goto cleanup;
|
||||
|
||||
|
|
|
|||
|
|
@ -302,7 +302,7 @@ int arena_spin_lock_slowpath(arena_spinlock_t __arena __arg_arena *lock, u32 val
|
|||
* barriers.
|
||||
*/
|
||||
if (val & _Q_LOCKED_MASK)
|
||||
smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
|
||||
(void)smp_cond_load_acquire_label(&lock->locked, !VAL, release_err);
|
||||
|
||||
/*
|
||||
* take ownership and clear the pending bit.
|
||||
|
|
@ -380,7 +380,7 @@ queue:
|
|||
/* Link @node into the waitqueue. */
|
||||
WRITE_ONCE(prev->next, node);
|
||||
|
||||
arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
|
||||
(void)arch_mcs_spin_lock_contended_label(&node->locked, release_node_err);
|
||||
|
||||
/*
|
||||
* While waiting for the MCS lock, the next pointer may have
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ unsigned char key[256] = {};
|
|||
u16 udp_test_port = 7777;
|
||||
u32 authsize, key_len;
|
||||
char algo[128] = {};
|
||||
char dst[16] = {};
|
||||
char dst[16] = {}, dst_bad[8] = {};
|
||||
int status;
|
||||
|
||||
static int skb_dynptr_validate(struct __sk_buff *skb, struct bpf_dynptr *psrc)
|
||||
|
|
@ -59,10 +59,9 @@ int skb_crypto_setup(void *ctx)
|
|||
.authsize = authsize,
|
||||
};
|
||||
struct bpf_crypto_ctx *cctx;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
status = 0;
|
||||
|
||||
if (key_len > 256) {
|
||||
status = -EINVAL;
|
||||
return 0;
|
||||
|
|
@ -70,8 +69,8 @@ int skb_crypto_setup(void *ctx)
|
|||
|
||||
__builtin_memcpy(¶ms.algo, algo, sizeof(algo));
|
||||
__builtin_memcpy(¶ms.key, key, sizeof(key));
|
||||
cctx = bpf_crypto_ctx_create(¶ms, sizeof(params), &err);
|
||||
|
||||
cctx = bpf_crypto_ctx_create(¶ms, sizeof(params), &err);
|
||||
if (!cctx) {
|
||||
status = err;
|
||||
return 0;
|
||||
|
|
@ -80,7 +79,6 @@ int skb_crypto_setup(void *ctx)
|
|||
err = crypto_ctx_insert(cctx);
|
||||
if (err && err != -EEXIST)
|
||||
status = err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -92,6 +90,7 @@ int decrypt_sanity(struct __sk_buff *skb)
|
|||
struct bpf_dynptr psrc, pdst;
|
||||
int err;
|
||||
|
||||
status = 0;
|
||||
err = skb_dynptr_validate(skb, &psrc);
|
||||
if (err < 0) {
|
||||
status = err;
|
||||
|
|
@ -110,13 +109,23 @@ int decrypt_sanity(struct __sk_buff *skb)
|
|||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
/* dst is a global variable to make testing part easier to check. In real
|
||||
* production code, a percpu map should be used to store the result.
|
||||
/* Check also bad case where the dst buffer is smaller than the
|
||||
* skb's linear section.
|
||||
*/
|
||||
bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
|
||||
status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
|
||||
if (!status)
|
||||
status = -EIO;
|
||||
if (status != -EINVAL)
|
||||
goto err;
|
||||
|
||||
/* dst is a global variable to make testing part easier to check.
|
||||
* In real production code, a percpu map should be used to store
|
||||
* the result.
|
||||
*/
|
||||
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
|
||||
|
||||
status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL);
|
||||
|
||||
err:
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
|
|
@ -129,7 +138,6 @@ int encrypt_sanity(struct __sk_buff *skb)
|
|||
int err;
|
||||
|
||||
status = 0;
|
||||
|
||||
err = skb_dynptr_validate(skb, &psrc);
|
||||
if (err < 0) {
|
||||
status = err;
|
||||
|
|
@ -148,13 +156,23 @@ int encrypt_sanity(struct __sk_buff *skb)
|
|||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
/* dst is a global variable to make testing part easier to check. In real
|
||||
* production code, a percpu map should be used to store the result.
|
||||
/* Check also bad case where the dst buffer is smaller than the
|
||||
* skb's linear section.
|
||||
*/
|
||||
bpf_dynptr_from_mem(dst_bad, sizeof(dst_bad), 0, &pdst);
|
||||
status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
|
||||
if (!status)
|
||||
status = -EIO;
|
||||
if (status != -EINVAL)
|
||||
goto err;
|
||||
|
||||
/* dst is a global variable to make testing part easier to check.
|
||||
* In real production code, a percpu map should be used to store
|
||||
* the result.
|
||||
*/
|
||||
bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst);
|
||||
|
||||
status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL);
|
||||
|
||||
err:
|
||||
return TC_ACT_SHOT;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -226,8 +226,7 @@ int obj_new_no_composite(void *ctx)
|
|||
SEC("?tc")
|
||||
int obj_new_no_struct(void *ctx)
|
||||
{
|
||||
|
||||
bpf_obj_new(union { int data; unsigned udata; });
|
||||
(void)bpf_obj_new(union { int data; unsigned udata; });
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -252,7 +251,7 @@ int new_null_ret(void *ctx)
|
|||
SEC("?tc")
|
||||
int obj_new_acq(void *ctx)
|
||||
{
|
||||
bpf_obj_new(struct foo);
|
||||
(void)bpf_obj_new(struct foo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,8 +30,12 @@ __test(2) int test_strcspn(void *ctx) { return bpf_strcspn(str, "lo"); }
|
|||
__test(6) int test_strstr_found(void *ctx) { return bpf_strstr(str, "world"); }
|
||||
__test(-ENOENT) int test_strstr_notfound(void *ctx) { return bpf_strstr(str, "hi"); }
|
||||
__test(0) int test_strstr_empty(void *ctx) { return bpf_strstr(str, ""); }
|
||||
__test(0) int test_strnstr_found(void *ctx) { return bpf_strnstr(str, "hello", 6); }
|
||||
__test(-ENOENT) int test_strnstr_notfound(void *ctx) { return bpf_strnstr(str, "hi", 10); }
|
||||
__test(0) int test_strnstr_found1(void *ctx) { return bpf_strnstr("", "", 0); }
|
||||
__test(0) int test_strnstr_found2(void *ctx) { return bpf_strnstr(str, "hello", 5); }
|
||||
__test(0) int test_strnstr_found3(void *ctx) { return bpf_strnstr(str, "hello", 6); }
|
||||
__test(-ENOENT) int test_strnstr_notfound1(void *ctx) { return bpf_strnstr(str, "hi", 10); }
|
||||
__test(-ENOENT) int test_strnstr_notfound2(void *ctx) { return bpf_strnstr(str, "hello", 4); }
|
||||
__test(-ENOENT) int test_strnstr_notfound3(void *ctx) { return bpf_strnstr("", "a", 0); }
|
||||
__test(0) int test_strnstr_empty(void *ctx) { return bpf_strnstr(str, "", 1); }
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
|
|
|||
Loading…
Reference in New Issue