From cba328fc5ede9091616e7296483840869b615a46 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Tue, 16 Nov 2010 15:19:51 +0000 Subject: filter: Optimize instruction revalidation code. Since repeating u16 value to u8 value conversion using switch() clause's case statement is wasteful, this patch introduces u16 to u8 mapping table and removes most of case statements. As a result, the size of net/core/filter.o is reduced by about 29% on x86. Signed-off-by: Tetsuo Handa Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/filter.c | 231 +++++++++++++++++------------------------------------- 1 file changed, 72 insertions(+), 159 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index 23e9b2a..03dc071 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -383,7 +383,57 @@ EXPORT_SYMBOL(sk_run_filter); */ int sk_chk_filter(struct sock_filter *filter, int flen) { - struct sock_filter *ftest; + /* + * Valid instructions are initialized to non-0. + * Invalid instructions are initialized to 0. + */ + static const u8 codes[] = { + [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K + 1, + [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X + 1, + [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K + 1, + [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X + 1, + [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K + 1, + [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X + 1, + [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X + 1, + [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K + 1, + [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X + 1, + [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K + 1, + [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X + 1, + [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K + 1, + [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X + 1, + [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K + 1, + [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X + 1, + [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG + 1, + [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS + 1, + [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS + 1, + [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS + 1, + [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN + 1, + [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND + 1, + [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND + 1, + [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND + 1, + [BPF_LD|BPF_IMM] = BPF_S_LD_IMM + 1, + [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN + 1, + [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH + 1, + [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM + 1, + [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX + 1, + [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA + 1, + [BPF_RET|BPF_K] = BPF_S_RET_K + 1, + [BPF_RET|BPF_A] = BPF_S_RET_A + 1, + [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K + 1, + [BPF_LD|BPF_MEM] = BPF_S_LD_MEM + 1, + [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM + 1, + [BPF_ST] = BPF_S_ST + 1, + [BPF_STX] = BPF_S_STX + 1, + [BPF_JMP|BPF_JA] = BPF_S_JMP_JA + 1, + [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K + 1, + [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X + 1, + [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K + 1, + [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X + 1, + [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K + 1, + [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X + 1, + [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1, + [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1, + }; int pc; if (flen == 0 || flen > BPF_MAXINSNS) @@ -391,136 +441,31 @@ int sk_chk_filter(struct sock_filter *filter, int flen) /* check the filter code now */ for (pc = 0; pc < flen; pc++) { - ftest = &filter[pc]; - - /* Only allow valid instructions */ - switch (ftest->code) { - case BPF_ALU|BPF_ADD|BPF_K: - ftest->code = BPF_S_ALU_ADD_K; - break; - case BPF_ALU|BPF_ADD|BPF_X: - ftest->code = BPF_S_ALU_ADD_X; - break; - case BPF_ALU|BPF_SUB|BPF_K: - ftest->code = BPF_S_ALU_SUB_K; - break; - case BPF_ALU|BPF_SUB|BPF_X: - ftest->code = BPF_S_ALU_SUB_X; - break; - case BPF_ALU|BPF_MUL|BPF_K: - ftest->code = BPF_S_ALU_MUL_K; - break; - case BPF_ALU|BPF_MUL|BPF_X: - ftest->code = BPF_S_ALU_MUL_X; - break; - case BPF_ALU|BPF_DIV|BPF_X: - ftest->code = BPF_S_ALU_DIV_X; - break; - case BPF_ALU|BPF_AND|BPF_K: - ftest->code = BPF_S_ALU_AND_K; - break; - case BPF_ALU|BPF_AND|BPF_X: - ftest->code = BPF_S_ALU_AND_X; - break; - case BPF_ALU|BPF_OR|BPF_K: - ftest->code = BPF_S_ALU_OR_K; - break; - case BPF_ALU|BPF_OR|BPF_X: - ftest->code = BPF_S_ALU_OR_X; - break; - case BPF_ALU|BPF_LSH|BPF_K: - ftest->code = BPF_S_ALU_LSH_K; - break; - case BPF_ALU|BPF_LSH|BPF_X: - ftest->code = BPF_S_ALU_LSH_X; - break; - case BPF_ALU|BPF_RSH|BPF_K: - ftest->code = BPF_S_ALU_RSH_K; - break; - case BPF_ALU|BPF_RSH|BPF_X: - ftest->code = BPF_S_ALU_RSH_X; - break; - case BPF_ALU|BPF_NEG: - ftest->code = BPF_S_ALU_NEG; - break; - case BPF_LD|BPF_W|BPF_ABS: - ftest->code = BPF_S_LD_W_ABS; - break; - case BPF_LD|BPF_H|BPF_ABS: - ftest->code = BPF_S_LD_H_ABS; - break; - case BPF_LD|BPF_B|BPF_ABS: - ftest->code = BPF_S_LD_B_ABS; - break; - case BPF_LD|BPF_W|BPF_LEN: - ftest->code = BPF_S_LD_W_LEN; - break; - case BPF_LD|BPF_W|BPF_IND: - ftest->code = BPF_S_LD_W_IND; - break; - case BPF_LD|BPF_H|BPF_IND: - ftest->code = BPF_S_LD_H_IND; - break; - case BPF_LD|BPF_B|BPF_IND: - ftest->code = BPF_S_LD_B_IND; - break; - case BPF_LD|BPF_IMM: - ftest->code = BPF_S_LD_IMM; - break; - case BPF_LDX|BPF_W|BPF_LEN: - ftest->code = BPF_S_LDX_W_LEN; - break; - case BPF_LDX|BPF_B|BPF_MSH: - ftest->code = BPF_S_LDX_B_MSH; - break; - case BPF_LDX|BPF_IMM: - ftest->code = BPF_S_LDX_IMM; - break; - case BPF_MISC|BPF_TAX: - ftest->code = BPF_S_MISC_TAX; - break; - case BPF_MISC|BPF_TXA: - ftest->code = BPF_S_MISC_TXA; - break; - case BPF_RET|BPF_K: - ftest->code = BPF_S_RET_K; - break; - case BPF_RET|BPF_A: - ftest->code = BPF_S_RET_A; - break; + struct sock_filter *ftest = &filter[pc]; + u16 code = ftest->code; + if (code >= ARRAY_SIZE(codes)) + return -EINVAL; + code = codes[code]; + /* Undo the '+ 1' in codes[] after validation. */ + if (!code--) + return -EINVAL; /* Some instructions need special checks */ - + switch (code) { + case BPF_S_ALU_DIV_K: /* check for division by zero */ - case BPF_ALU|BPF_DIV|BPF_K: if (ftest->k == 0) return -EINVAL; - ftest->code = BPF_S_ALU_DIV_K; break; - - /* check for invalid memory addresses */ - case BPF_LD|BPF_MEM: - if (ftest->k >= BPF_MEMWORDS) - return -EINVAL; - ftest->code = BPF_S_LD_MEM; - break; - case BPF_LDX|BPF_MEM: - if (ftest->k >= BPF_MEMWORDS) - return -EINVAL; - ftest->code = BPF_S_LDX_MEM; - break; - case BPF_ST: - if (ftest->k >= BPF_MEMWORDS) - return -EINVAL; - ftest->code = BPF_S_ST; - break; - case BPF_STX: + case BPF_S_LD_MEM: + case BPF_S_LDX_MEM: + case BPF_S_ST: + case BPF_S_STX: + /* check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; - ftest->code = BPF_S_STX; break; - - case BPF_JMP|BPF_JA: + case BPF_S_JMP_JA: /* * Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, @@ -528,40 +473,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) */ if (ftest->k >= (unsigned)(flen-pc-1)) return -EINVAL; - ftest->code = BPF_S_JMP_JA; - break; - - case BPF_JMP|BPF_JEQ|BPF_K: - ftest->code = BPF_S_JMP_JEQ_K; - break; - case BPF_JMP|BPF_JEQ|BPF_X: - ftest->code = BPF_S_JMP_JEQ_X; break; - case BPF_JMP|BPF_JGE|BPF_K: - ftest->code = BPF_S_JMP_JGE_K; - break; - case BPF_JMP|BPF_JGE|BPF_X: - ftest->code = BPF_S_JMP_JGE_X; - break; - case BPF_JMP|BPF_JGT|BPF_K: - ftest->code = BPF_S_JMP_JGT_K; - break; - case BPF_JMP|BPF_JGT|BPF_X: - ftest->code = BPF_S_JMP_JGT_X; - break; - case BPF_JMP|BPF_JSET|BPF_K: - ftest->code = BPF_S_JMP_JSET_K; - break; - case BPF_JMP|BPF_JSET|BPF_X: - ftest->code = BPF_S_JMP_JSET_X; - break; - - default: - return -EINVAL; - } - - /* for conditionals both must be safe */ - switch (ftest->code) { case BPF_S_JMP_JEQ_K: case BPF_S_JMP_JEQ_X: case BPF_S_JMP_JGE_K: @@ -570,10 +482,13 @@ int sk_chk_filter(struct sock_filter *filter, int flen) case BPF_S_JMP_JGT_X: case BPF_S_JMP_JSET_X: case BPF_S_JMP_JSET_K: + /* for conditionals both must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; + break; } + ftest->code = code; } /* last instruction must be a RET code */ @@ -581,10 +496,8 @@ int sk_chk_filter(struct sock_filter *filter, int flen) case BPF_S_RET_K: case BPF_S_RET_A: return 0; - break; - default: - return -EINVAL; - } + } + return -EINVAL; } EXPORT_SYMBOL(sk_chk_filter); -- cgit v1.1 From 4c3710afbc333c33100739dec10662b4ee64e219 Mon Sep 17 00:00:00 2001 From: Changli Gao Date: Tue, 16 Nov 2010 20:28:24 +0000 Subject: net: move definitions of BPF_S_* to net/core/filter.c BPF_S_* are used internally, should not be exposed to the others. Signed-off-by: Changli Gao Acked-by: Eric Dumazet Acked-by: Hagen Paul Pfeifer Signed-off-by: David S. Miller --- net/core/filter.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index 03dc071..15a545d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -38,6 +38,54 @@ #include #include +enum { + BPF_S_RET_K = 0, + BPF_S_RET_A, + BPF_S_ALU_ADD_K, + BPF_S_ALU_ADD_X, + BPF_S_ALU_SUB_K, + BPF_S_ALU_SUB_X, + BPF_S_ALU_MUL_K, + BPF_S_ALU_MUL_X, + BPF_S_ALU_DIV_X, + BPF_S_ALU_AND_K, + BPF_S_ALU_AND_X, + BPF_S_ALU_OR_K, + BPF_S_ALU_OR_X, + BPF_S_ALU_LSH_K, + BPF_S_ALU_LSH_X, + BPF_S_ALU_RSH_K, + BPF_S_ALU_RSH_X, + BPF_S_ALU_NEG, + BPF_S_LD_W_ABS, + BPF_S_LD_H_ABS, + BPF_S_LD_B_ABS, + BPF_S_LD_W_LEN, + BPF_S_LD_W_IND, + BPF_S_LD_H_IND, + BPF_S_LD_B_IND, + BPF_S_LD_IMM, + BPF_S_LDX_W_LEN, + BPF_S_LDX_B_MSH, + BPF_S_LDX_IMM, + BPF_S_MISC_TAX, + BPF_S_MISC_TXA, + BPF_S_ALU_DIV_K, + BPF_S_LD_MEM, + BPF_S_LDX_MEM, + BPF_S_ST, + BPF_S_STX, + BPF_S_JMP_JA, + BPF_S_JMP_JEQ_K, + BPF_S_JMP_JEQ_X, + BPF_S_JMP_JGE_K, + BPF_S_JMP_JGE_X, + BPF_S_JMP_JGT_K, + BPF_S_JMP_JGT_X, + BPF_S_JMP_JSET_K, + BPF_S_JMP_JSET_X, +}; + /* No hurry in this branch */ static void *__load_pointer(struct sk_buff *skb, int k) { -- cgit v1.1 From 93aaae2e01e57483256b7da05c9a7ebd65ad4686 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Nov 2010 09:49:59 -0800 Subject: filter: optimize sk_run_filter Remove pc variable to avoid arithmetic to compute fentry at each filter instruction. Jumps directly manipulate fentry pointer. As the last instruction of filter[] is guaranteed to be a RETURN, and all jumps are before the last instruction, we dont need to check filter bounds (number of instructions in filter array) at each iteration, so we remove it from sk_run_filter() params. On x86_32 remove f_k var introduced in commit 57fe93b374a6b871 (filter: make sure filters dont read uninitialized memory) Note : We could use a CONFIG_ARCH_HAS_{FEW|MANY}_REGISTERS in order to avoid too many ifdefs in this code. This helps compiler to use cpu registers to hold fentry and A accumulator. On x86_32, this saves 401 bytes, and more important, sk_run_filter() runs much faster because less register pressure (One less conditional branch per BPF instruction) # size net/core/filter.o net/core/filter_pre.o text data bss dec hex filename 2948 0 0 2948 b84 net/core/filter.o 3349 0 0 3349 d15 net/core/filter_pre.o on x86_64 : # size net/core/filter.o net/core/filter_pre.o text data bss dec hex filename 5173 0 0 5173 1435 net/core/filter.o 5224 0 0 5224 1468 net/core/filter_pre.o Signed-off-by: Eric Dumazet Acked-by: Changli Gao Signed-off-by: David S. Miller --- net/core/filter.c | 93 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 48 insertions(+), 45 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index 15a545d..9e77b3c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -137,7 +137,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb) rcu_read_lock_bh(); filter = rcu_dereference_bh(sk->sk_filter); if (filter) { - unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len); + unsigned int pkt_len = sk_run_filter(skb, filter->insns); err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; } @@ -151,14 +151,15 @@ EXPORT_SYMBOL(sk_filter); * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on * @filter: filter to apply - * @flen: length of filter * * Decode and apply filter instructions to the skb->data. - * Return length to keep, 0 for none. skb is the data we are - * filtering, filter is the array of filter instructions, and - * len is the number of filter blocks in the array. + * Return length to keep, 0 for none. @skb is the data we are + * filtering, @filter is the array of filter instructions. + * Because all jumps are guaranteed to be before last instruction, + * and last instruction guaranteed to be a RET, we dont need to check + * flen. (We used to pass to this function the length of filter) */ -unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) +unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ @@ -167,34 +168,36 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int unsigned long memvalid = 0; u32 tmp; int k; - int pc; BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ - for (pc = 0; pc < flen; pc++) { - const struct sock_filter *fentry = &filter[pc]; - u32 f_k = fentry->k; + for (;; fentry++) { +#if defined(CONFIG_X86_32) +#define K (fentry->k) +#else + const u32 K = fentry->k; +#endif switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: - A += f_k; + A += K; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: - A -= f_k; + A -= K; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: - A *= f_k; + A *= K; continue; case BPF_S_ALU_DIV_X: if (X == 0) @@ -202,64 +205,64 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int A /= X; continue; case BPF_S_ALU_DIV_K: - A /= f_k; + A /= K; continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: - A &= f_k; + A &= K; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: - A |= f_k; + A |= K; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: - A <<= f_k; + A <<= K; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: - A >>= f_k; + A >>= K; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: - pc += f_k; + fentry += K; continue; case BPF_S_JMP_JGT_K: - pc += (A > f_k) ? fentry->jt : fentry->jf; + fentry += (A > K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: - pc += (A >= f_k) ? fentry->jt : fentry->jf; + fentry += (A >= K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: - pc += (A == f_k) ? fentry->jt : fentry->jf; + fentry += (A == K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: - pc += (A & f_k) ? fentry->jt : fentry->jf; + fentry += (A & K) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: - pc += (A > X) ? fentry->jt : fentry->jf; + fentry += (A > X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_X: - pc += (A >= X) ? fentry->jt : fentry->jf; + fentry += (A >= X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_X: - pc += (A == X) ? fentry->jt : fentry->jf; + fentry += (A == X) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_X: - pc += (A & X) ? fentry->jt : fentry->jf; + fentry += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: - k = f_k; + k = K; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { @@ -268,7 +271,7 @@ load_w: } break; case BPF_S_LD_H_ABS: - k = f_k; + k = K; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { @@ -277,7 +280,7 @@ load_h: } break; case BPF_S_LD_B_ABS: - k = f_k; + k = K; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { @@ -292,34 +295,34 @@ load_b: X = skb->len; continue; case BPF_S_LD_W_IND: - k = X + f_k; + k = X + K; goto load_w; case BPF_S_LD_H_IND: - k = X + f_k; + k = X + K; goto load_h; case BPF_S_LD_B_IND: - k = X + f_k; + k = X + K; goto load_b; case BPF_S_LDX_B_MSH: - ptr = load_pointer(skb, f_k, 1, &tmp); + ptr = load_pointer(skb, K, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: - A = f_k; + A = K; continue; case BPF_S_LDX_IMM: - X = f_k; + X = K; continue; case BPF_S_LD_MEM: - A = (memvalid & (1UL << f_k)) ? - mem[f_k] : 0; + A = (memvalid & (1UL << K)) ? + mem[K] : 0; continue; case BPF_S_LDX_MEM: - X = (memvalid & (1UL << f_k)) ? - mem[f_k] : 0; + X = (memvalid & (1UL << K)) ? + mem[K] : 0; continue; case BPF_S_MISC_TAX: X = A; @@ -328,16 +331,16 @@ load_b: A = X; continue; case BPF_S_RET_K: - return f_k; + return K; case BPF_S_RET_A: return A; case BPF_S_ST: - memvalid |= 1UL << f_k; - mem[f_k] = A; + memvalid |= 1UL << K; + mem[K] = A; continue; case BPF_S_STX: - memvalid |= 1UL << f_k; - mem[f_k] = X; + memvalid |= 1UL << K; + mem[K] = X; continue; default: WARN_ON(1); -- cgit v1.1 From 8c1592d68bc89248bfd0ee287648f41c1370d826 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 18 Nov 2010 21:56:38 +0000 Subject: filter: cleanup codes[] init Starting the translated instruction to 1 instead of 0 allows us to remove one descrement at check time and makes codes[] array init cleaner. Signed-off-by: Eric Dumazet Acked-by: Changli Gao Signed-off-by: David S. Miller --- net/core/filter.c | 95 +++++++++++++++++++++++++++---------------------------- 1 file changed, 47 insertions(+), 48 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index 9e77b3c..c0b68f7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -39,7 +39,7 @@ #include enum { - BPF_S_RET_K = 0, + BPF_S_RET_K = 1, BPF_S_RET_A, BPF_S_ALU_ADD_K, BPF_S_ALU_ADD_X, @@ -439,51 +439,51 @@ int sk_chk_filter(struct sock_filter *filter, int flen) * Invalid instructions are initialized to 0. */ static const u8 codes[] = { - [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K + 1, - [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X + 1, - [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K + 1, - [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X + 1, - [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K + 1, - [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X + 1, - [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X + 1, - [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K + 1, - [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X + 1, - [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K + 1, - [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X + 1, - [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K + 1, - [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X + 1, - [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K + 1, - [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X + 1, - [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG + 1, - [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS + 1, - [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS + 1, - [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS + 1, - [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN + 1, - [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND + 1, - [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND + 1, - [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND + 1, - [BPF_LD|BPF_IMM] = BPF_S_LD_IMM + 1, - [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN + 1, - [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH + 1, - [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM + 1, - [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX + 1, - [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA + 1, - [BPF_RET|BPF_K] = BPF_S_RET_K + 1, - [BPF_RET|BPF_A] = BPF_S_RET_A + 1, - [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K + 1, - [BPF_LD|BPF_MEM] = BPF_S_LD_MEM + 1, - [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM + 1, - [BPF_ST] = BPF_S_ST + 1, - [BPF_STX] = BPF_S_STX + 1, - [BPF_JMP|BPF_JA] = BPF_S_JMP_JA + 1, - [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K + 1, - [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X + 1, - [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K + 1, - [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X + 1, - [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K + 1, - [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X + 1, - [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1, - [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1, + [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K, + [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X, + [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K, + [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X, + [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K, + [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X, + [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X, + [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K, + [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X, + [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K, + [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X, + [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K, + [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X, + [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K, + [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X, + [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG, + [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS, + [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS, + [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS, + [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN, + [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND, + [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND, + [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND, + [BPF_LD|BPF_IMM] = BPF_S_LD_IMM, + [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN, + [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH, + [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM, + [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX, + [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA, + [BPF_RET|BPF_K] = BPF_S_RET_K, + [BPF_RET|BPF_A] = BPF_S_RET_A, + [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K, + [BPF_LD|BPF_MEM] = BPF_S_LD_MEM, + [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM, + [BPF_ST] = BPF_S_ST, + [BPF_STX] = BPF_S_STX, + [BPF_JMP|BPF_JA] = BPF_S_JMP_JA, + [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K, + [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X, + [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K, + [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X, + [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K, + [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X, + [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K, + [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X, }; int pc; @@ -498,8 +498,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) if (code >= ARRAY_SIZE(codes)) return -EINVAL; code = codes[code]; - /* Undo the '+ 1' in codes[] after validation. */ - if (!code--) + if (!code) return -EINVAL; /* Some instructions need special checks */ switch (code) { -- cgit v1.1 From c26aed40f4fd18f86bcc6aba557cab700b129b73 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 18 Nov 2010 22:04:46 +0000 Subject: filter: use reciprocal divide At compile time, we can replace the DIV_K instruction (divide by a constant value) by a reciprocal divide. At exec time, the expensive divide is replaced by a multiply, a less expensive operation on most processors. Signed-off-by: Eric Dumazet Acked-by: Changli Gao Signed-off-by: David S. Miller --- net/core/filter.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index c0b68f7..23e0a2a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -37,6 +37,7 @@ #include #include #include +#include enum { BPF_S_RET_K = 1, @@ -205,7 +206,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry A /= X; continue; case BPF_S_ALU_DIV_K: - A /= K; + A = reciprocal_divide(A, K); continue; case BPF_S_ALU_AND_X: A &= X; @@ -506,6 +507,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) /* check for division by zero */ if (ftest->k == 0) return -EINVAL; + ftest->k = reciprocal_value(ftest->k); break; case BPF_S_LD_MEM: case BPF_S_LDX_MEM: -- cgit v1.1 From da2033c282264bfba4e339b7cb3df62adb5c5fc8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 30 Nov 2010 21:45:56 +0000 Subject: filter: add SKF_AD_RXHASH and SKF_AD_CPU Add SKF_AD_RXHASH and SKF_AD_CPU to filter ancillary mechanism, to be able to build advanced filters. This can help spreading packets on several sockets with a fast selection, after RPS dispatch to N cpus for example, or to catch a percentage of flows in one queue. tcpdump -s 500 "cpu = 1" : [0] ld CPU [1] jeq #1 jt 2 jf 3 [2] ret #500 [3] ret #0 # take 12.5 % of flows (average) tcpdump -s 1000 "rxhash & 7 = 2" : [0] ld RXHASH [1] and #7 [2] jeq #2 jt 3 jf 4 [3] ret #1000 [4] ret #0 Signed-off-by: Eric Dumazet Cc: Rui Acked-by: Changli Gao Signed-off-by: David S. Miller --- net/core/filter.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index a44d27f..054e286 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -375,6 +375,12 @@ load_b: return 0; A = skb->dev->type; continue; + case SKF_AD_RXHASH: + A = skb->rxhash; + continue; + case SKF_AD_CPU: + A = raw_smp_processor_id(); + continue; case SKF_AD_NLATTR: { struct nlattr *nla; -- cgit v1.1 From 2d5311e4e8272fd398fc1cf278f12fd6dee4074b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 1 Dec 2010 20:46:24 +0000 Subject: filter: add a security check at install time We added some security checks in commit 57fe93b374a6 (filter: make sure filters dont read uninitialized memory) to close a potential leak of kernel information to user. This added a potential extra cost at run time, while we can perform a check of the filter itself, to make sure a malicious user doesnt try to abuse us. This patch adds a check_loads() function, whole unique purpose is to make this check, allocating a temporary array of mask. We scan the filter and propagate a bitmask information, telling us if a load M(K) is allowed because a previous store M(K) is guaranteed. (So that sk_run_filter() can possibly not read unitialized memory) Note: this can uncover application bug, denying a filter attach, previously allowed. Signed-off-by: Eric Dumazet Cc: Dan Rosenberg Cc: Changli Gao Acked-by: Changli Gao Signed-off-by: David S. Miller --- net/core/filter.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 63 insertions(+), 9 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index 054e286..ac4920a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -166,11 +166,9 @@ unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ - unsigned long memvalid = 0; u32 tmp; int k; - BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ @@ -318,12 +316,10 @@ load_b: X = K; continue; case BPF_S_LD_MEM: - A = (memvalid & (1UL << K)) ? - mem[K] : 0; + A = mem[K]; continue; case BPF_S_LDX_MEM: - X = (memvalid & (1UL << K)) ? - mem[K] : 0; + X = mem[K]; continue; case BPF_S_MISC_TAX: X = A; @@ -336,11 +332,9 @@ load_b: case BPF_S_RET_A: return A; case BPF_S_ST: - memvalid |= 1UL << K; mem[K] = A; continue; case BPF_S_STX: - memvalid |= 1UL << K; mem[K] = X; continue; default: @@ -425,6 +419,66 @@ load_b: } EXPORT_SYMBOL(sk_run_filter); +/* + * Security : + * A BPF program is able to use 16 cells of memory to store intermediate + * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter()) + * As we dont want to clear mem[] array for each packet going through + * sk_run_filter(), we check that filter loaded by user never try to read + * a cell if not previously written, and we check all branches to be sure + * a malicious user doesnt try to abuse us. + */ +static int check_load_and_stores(struct sock_filter *filter, int flen) +{ + u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ + int pc, ret = 0; + + BUILD_BUG_ON(BPF_MEMWORDS > 16); + masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); + if (!masks) + return -ENOMEM; + memset(masks, 0xff, flen * sizeof(*masks)); + + for (pc = 0; pc < flen; pc++) { + memvalid &= masks[pc]; + + switch (filter[pc].code) { + case BPF_S_ST: + case BPF_S_STX: + memvalid |= (1 << filter[pc].k); + break; + case BPF_S_LD_MEM: + case BPF_S_LDX_MEM: + if (!(memvalid & (1 << filter[pc].k))) { + ret = -EINVAL; + goto error; + } + break; + case BPF_S_JMP_JA: + /* a jump must set masks on target */ + masks[pc + 1 + filter[pc].k] &= memvalid; + memvalid = ~0; + break; + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JEQ_X: + case BPF_S_JMP_JGE_K: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JSET_X: + case BPF_S_JMP_JSET_K: + /* a jump must set masks on targets */ + masks[pc + 1 + filter[pc].jt] &= memvalid; + masks[pc + 1 + filter[pc].jf] &= memvalid; + memvalid = ~0; + break; + } + } +error: + kfree(masks); + return ret; +} + /** * sk_chk_filter - verify socket filter code * @filter: filter to verify @@ -553,7 +607,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen) switch (filter[flen - 1].code) { case BPF_S_RET_K: case BPF_S_RET_A: - return 0; + return check_load_and_stores(filter, flen); } return -EINVAL; } -- cgit v1.1 From 62ab0812137ec4f9884dd7de346238841ac03283 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 6 Dec 2010 20:50:09 +0000 Subject: filter: constify sk_run_filter() sk_run_filter() doesnt write on skb, change its prototype to reflect this. Fix two af_packet comments. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/filter.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index ac4920a..25500f1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -88,7 +88,7 @@ enum { }; /* No hurry in this branch */ -static void *__load_pointer(struct sk_buff *skb, int k) +static void *__load_pointer(const struct sk_buff *skb, int k) { u8 *ptr = NULL; @@ -102,7 +102,7 @@ static void *__load_pointer(struct sk_buff *skb, int k) return NULL; } -static inline void *load_pointer(struct sk_buff *skb, int k, +static inline void *load_pointer(const struct sk_buff *skb, int k, unsigned int size, void *buffer) { if (k >= 0) @@ -160,7 +160,8 @@ EXPORT_SYMBOL(sk_filter); * and last instruction guaranteed to be a RET, we dont need to check * flen. (We used to pass to this function the length of filter) */ -unsigned int sk_run_filter(struct sk_buff *skb, const struct sock_filter *fentry) +unsigned int sk_run_filter(const struct sk_buff *skb, + const struct sock_filter *fentry) { void *ptr; u32 A = 0; /* Accumulator */ -- cgit v1.1 From 4bc65dd8d88671712d71592a83374cfb0b5fce7a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 7 Dec 2010 22:26:15 +0000 Subject: filter: use size of fetched data in __load_pointer() __load_pointer() checks data we fetch from skb is included in head portion, but assumes we fetch one byte, instead of up to four. This wont crash because we have extra bytes (struct skb_shared_info) after head, but this can read uninitialized bytes. Fix this using size of the data (1, 2, 4 bytes) in the test. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/filter.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index e193e29..e8a6ac4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -88,7 +88,7 @@ enum { }; /* No hurry in this branch */ -static void *__load_pointer(const struct sk_buff *skb, int k) +static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) { u8 *ptr = NULL; @@ -97,7 +97,7 @@ static void *__load_pointer(const struct sk_buff *skb, int k) else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - if (ptr >= skb->head && ptr < skb_tail_pointer(skb)) + if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; return NULL; } @@ -110,7 +110,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, else { if (k >= SKF_AD_OFF) return NULL; - return __load_pointer(skb, k); + return __load_pointer(skb, k, size); } } -- cgit v1.1 From 12b16dadbc2406144d408754f96d0f44aa016239 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 15 Dec 2010 19:45:28 +0000 Subject: filter: optimize accesses to ancillary data We can translate pseudo load instructions at filter check time to dedicated instructions to speed up filtering and avoid one switch(). libpcap currently uses SKF_AD_PROTOCOL, but custom filters probably use other ancillary accesses. Note : I made the assertion that ancillary data was always accessed with BPF_LD|BPF_?|BPF_ABS instructions, not with BPF_LD|BPF_?|BPF_IND ones (offset given by K constant, not by K + X register) On x86_64, this saves a few bytes of text : # size net/core/filter.o.* text data bss dec hex filename 4864 0 0 4864 1300 net/core/filter.o.new 4944 0 0 4944 1350 net/core/filter.o.old Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/filter.c | 72 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 28 deletions(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index e8a6ac4..2b27d4e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -85,6 +85,17 @@ enum { BPF_S_JMP_JGT_X, BPF_S_JMP_JSET_K, BPF_S_JMP_JSET_X, + /* Ancillary data */ + BPF_S_ANC_PROTOCOL, + BPF_S_ANC_PKTTYPE, + BPF_S_ANC_IFINDEX, + BPF_S_ANC_NLATTR, + BPF_S_ANC_NLATTR_NEST, + BPF_S_ANC_MARK, + BPF_S_ANC_QUEUE, + BPF_S_ANC_HATYPE, + BPF_S_ANC_RXHASH, + BPF_S_ANC_CPU, }; /* No hurry in this branch */ @@ -107,11 +118,7 @@ static inline void *load_pointer(const struct sk_buff *skb, int k, { if (k >= 0) return skb_header_pointer(skb, k, size, buffer); - else { - if (k >= SKF_AD_OFF) - return NULL; - return __load_pointer(skb, k, size); - } + return __load_pointer(skb, k, size); } /** @@ -269,7 +276,7 @@ load_w: A = get_unaligned_be32(ptr); continue; } - break; + return 0; case BPF_S_LD_H_ABS: k = K; load_h: @@ -278,7 +285,7 @@ load_h: A = get_unaligned_be16(ptr); continue; } - break; + return 0; case BPF_S_LD_B_ABS: k = K; load_b: @@ -287,7 +294,7 @@ load_b: A = *(u8 *)ptr; continue; } - break; + return 0; case BPF_S_LD_W_LEN: A = skb->len; continue; @@ -338,45 +345,35 @@ load_b: case BPF_S_STX: mem[K] = X; continue; - default: - WARN_ON(1); - return 0; - } - - /* - * Handle ancillary data, which are impossible - * (or very difficult) to get parsing packet contents. - */ - switch (k-SKF_AD_OFF) { - case SKF_AD_PROTOCOL: + case BPF_S_ANC_PROTOCOL: A = ntohs(skb->protocol); continue; - case SKF_AD_PKTTYPE: + case BPF_S_ANC_PKTTYPE: A = skb->pkt_type; continue; - case SKF_AD_IFINDEX: + case BPF_S_ANC_IFINDEX: if (!skb->dev) return 0; A = skb->dev->ifindex; continue; - case SKF_AD_MARK: + case BPF_S_ANC_MARK: A = skb->mark; continue; - case SKF_AD_QUEUE: + case BPF_S_ANC_QUEUE: A = skb->queue_mapping; continue; - case SKF_AD_HATYPE: + case BPF_S_ANC_HATYPE: if (!skb->dev) return 0; A = skb->dev->type; continue; - case SKF_AD_RXHASH: + case BPF_S_ANC_RXHASH: A = skb->rxhash; continue; - case SKF_AD_CPU: + case BPF_S_ANC_CPU: A = raw_smp_processor_id(); continue; - case SKF_AD_NLATTR: { + case BPF_S_ANC_NLATTR: { struct nlattr *nla; if (skb_is_nonlinear(skb)) @@ -392,7 +389,7 @@ load_b: A = 0; continue; } - case SKF_AD_NLATTR_NEST: { + case BPF_S_ANC_NLATTR_NEST: { struct nlattr *nla; if (skb_is_nonlinear(skb)) @@ -412,6 +409,7 @@ load_b: continue; } default: + WARN_ON(1); return 0; } } @@ -600,6 +598,24 @@ int sk_chk_filter(struct sock_filter *filter, int flen) pc + ftest->jf + 1 >= flen) return -EINVAL; break; + case BPF_S_LD_W_ABS: + case BPF_S_LD_H_ABS: + case BPF_S_LD_B_ABS: +#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ + code = BPF_S_ANC_##CODE; \ + break + switch (ftest->k) { + ANCILLARY(PROTOCOL); + ANCILLARY(PKTTYPE); + ANCILLARY(IFINDEX); + ANCILLARY(NLATTR); + ANCILLARY(NLATTR_NEST); + ANCILLARY(MARK); + ANCILLARY(QUEUE); + ANCILLARY(HATYPE); + ANCILLARY(RXHASH); + ANCILLARY(CPU); + } } ftest->code = code; } -- cgit v1.1 From 697d0e338c7fd392cf73bf120150ab6e5516a3a3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 8 Jan 2011 17:41:42 +0000 Subject: net: fix kernel-doc warning in core/filter.c Fix new kernel-doc notation warning in net/core/filter.c: Warning(net/core/filter.c:172): No description found for parameter 'fentry' Warning(net/core/filter.c:172): Excess function parameter 'filter' description in 'sk_run_filter' Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- net/core/filter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/filter.c') diff --git a/net/core/filter.c b/net/core/filter.c index 2b27d4e..afc5837 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter); /** * sk_run_filter - run a filter on a socket * @skb: buffer to run the filter on - * @filter: filter to apply + * @fentry: filter to apply * * Decode and apply filter instructions to the skb->data. * Return length to keep, 0 for none. @skb is the data we are -- cgit v1.1