kernel: improve ipv4 netfilter optimization patch
Signed-off-by: Felix Fietkau <nbd@openwrt.org> SVN-Revision: 42045
This commit is contained in:
parent
ad876993d3
commit
3e64341eac
2 changed files with 62 additions and 49 deletions
|
@ -34,33 +34,35 @@
|
||||||
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
|
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
|
||||||
unsigned int
|
unsigned int
|
||||||
ipt_do_table(struct sk_buff *skb,
|
ipt_do_table(struct sk_buff *skb,
|
||||||
@@ -334,6 +361,25 @@ ipt_do_table(struct sk_buff *skb,
|
@@ -331,9 +358,27 @@ ipt_do_table(struct sk_buff *skb,
|
||||||
ip = ip_hdr(skb);
|
unsigned int addend;
|
||||||
indev = in ? in->name : nulldevname;
|
|
||||||
outdev = out ? out->name : nulldevname;
|
/* Initialization */
|
||||||
+
|
|
||||||
+ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
|
+ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
|
||||||
+ local_bh_disable();
|
+ local_bh_disable();
|
||||||
+ addend = xt_write_recseq_begin();
|
|
||||||
+ private = table->private;
|
+ private = table->private;
|
||||||
+ cpu = smp_processor_id();
|
+ cpu = smp_processor_id();
|
||||||
+ table_base = private->entries[cpu];
|
+ table_base = private->entries[cpu];
|
||||||
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
|
||||||
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
|
|
||||||
+ origptr = *stackptr;
|
|
||||||
+
|
|
||||||
+ e = get_entry(table_base, private->hook_entry[hook]);
|
+ e = get_entry(table_base, private->hook_entry[hook]);
|
||||||
+ if (ipt_handle_default_rule(e, &verdict)) {
|
+ if (ipt_handle_default_rule(e, &verdict)) {
|
||||||
+ ADD_COUNTER(e->counters, skb->len, 1);
|
+ ADD_COUNTER(e->counters, skb->len, 1);
|
||||||
+ xt_write_recseq_end(addend);
|
|
||||||
+ local_bh_enable();
|
+ local_bh_enable();
|
||||||
+ return verdict;
|
+ return verdict;
|
||||||
+ }
|
+ }
|
||||||
|
+
|
||||||
|
ip = ip_hdr(skb);
|
||||||
|
indev = in ? in->name : nulldevname;
|
||||||
|
outdev = out ? out->name : nulldevname;
|
||||||
|
+
|
||||||
|
+ addend = xt_write_recseq_begin();
|
||||||
|
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
||||||
|
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||||
|
+ origptr = *stackptr;
|
||||||
+
|
+
|
||||||
/* We handle fragments by dealing with the first fragment as
|
/* We handle fragments by dealing with the first fragment as
|
||||||
* if it was a normal packet. All other fragments are treated
|
* if it was a normal packet. All other fragments are treated
|
||||||
* normally, except that they will NEVER match rules that ask
|
* normally, except that they will NEVER match rules that ask
|
||||||
@@ -348,18 +394,6 @@ ipt_do_table(struct sk_buff *skb,
|
@@ -348,18 +393,6 @@ ipt_do_table(struct sk_buff *skb,
|
||||||
acpar.family = NFPROTO_IPV4;
|
acpar.family = NFPROTO_IPV4;
|
||||||
acpar.hooknum = hook;
|
acpar.hooknum = hook;
|
||||||
|
|
||||||
|
|
|
@ -34,50 +34,61 @@
|
||||||
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
|
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
|
||||||
unsigned int
|
unsigned int
|
||||||
ipt_do_table(struct sk_buff *skb,
|
ipt_do_table(struct sk_buff *skb,
|
||||||
@@ -334,19 +361,6 @@ ipt_do_table(struct sk_buff *skb,
|
@@ -331,9 +358,33 @@ ipt_do_table(struct sk_buff *skb,
|
||||||
ip = ip_hdr(skb);
|
unsigned int addend;
|
||||||
indev = in ? in->name : nulldevname;
|
|
||||||
outdev = out ? out->name : nulldevname;
|
|
||||||
- /* We handle fragments by dealing with the first fragment as
|
|
||||||
- * if it was a normal packet. All other fragments are treated
|
|
||||||
- * normally, except that they will NEVER match rules that ask
|
|
||||||
- * things we don't know, ie. tcp syn flag or ports). If the
|
|
||||||
- * rule is also a fragment-specific rule, non-fragments won't
|
|
||||||
- * match it. */
|
|
||||||
- acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
|
|
||||||
- acpar.thoff = ip_hdrlen(skb);
|
|
||||||
- acpar.hotdrop = false;
|
|
||||||
- acpar.in = in;
|
|
||||||
- acpar.out = out;
|
|
||||||
- acpar.family = NFPROTO_IPV4;
|
|
||||||
- acpar.hooknum = hook;
|
|
||||||
|
|
||||||
IP_NF_ASSERT(table->valid_hooks & (1 << hook));
|
/* Initialization */
|
||||||
local_bh_disable();
|
+ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
|
||||||
@@ -364,6 +378,26 @@ ipt_do_table(struct sk_buff *skb,
|
+ local_bh_disable();
|
||||||
origptr = *stackptr;
|
+ private = table->private;
|
||||||
|
+ cpu = smp_processor_id();
|
||||||
e = get_entry(table_base, private->hook_entry[hook]);
|
+ /*
|
||||||
|
+ * Ensure we load private-> members after we've fetched the base
|
||||||
|
+ * pointer.
|
||||||
|
+ */
|
||||||
|
+ smp_read_barrier_depends();
|
||||||
|
+ table_base = private->entries[cpu];
|
||||||
|
+
|
||||||
|
+ e = get_entry(table_base, private->hook_entry[hook]);
|
||||||
+ if (ipt_handle_default_rule(e, &verdict)) {
|
+ if (ipt_handle_default_rule(e, &verdict)) {
|
||||||
+ ADD_COUNTER(e->counters, skb->len, 1);
|
+ ADD_COUNTER(e->counters, skb->len, 1);
|
||||||
+ xt_write_recseq_end(addend);
|
|
||||||
+ local_bh_enable();
|
+ local_bh_enable();
|
||||||
+ return verdict;
|
+ return verdict;
|
||||||
+ }
|
+ }
|
||||||
+
|
+
|
||||||
+ /* We handle fragments by dealing with the first fragment as
|
ip = ip_hdr(skb);
|
||||||
+ * if it was a normal packet. All other fragments are treated
|
indev = in ? in->name : nulldevname;
|
||||||
+ * normally, except that they will NEVER match rules that ask
|
outdev = out ? out->name : nulldevname;
|
||||||
+ * things we don't know, ie. tcp syn flag or ports). If the
|
+
|
||||||
+ * rule is also a fragment-specific rule, non-fragments won't
|
+ addend = xt_write_recseq_begin();
|
||||||
+ * match it. */
|
+ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
||||||
+ acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
|
+ stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||||
+ acpar.thoff = ip_hdrlen(skb);
|
+ origptr = *stackptr;
|
||||||
+ acpar.hotdrop = false;
|
+
|
||||||
+ acpar.in = in;
|
/* We handle fragments by dealing with the first fragment as
|
||||||
+ acpar.out = out;
|
* if it was a normal packet. All other fragments are treated
|
||||||
+ acpar.family = NFPROTO_IPV4;
|
* normally, except that they will NEVER match rules that ask
|
||||||
+ acpar.hooknum = hook;
|
@@ -348,23 +399,6 @@ ipt_do_table(struct sk_buff *skb,
|
||||||
|
acpar.family = NFPROTO_IPV4;
|
||||||
|
acpar.hooknum = hook;
|
||||||
|
|
||||||
|
- IP_NF_ASSERT(table->valid_hooks & (1 << hook));
|
||||||
|
- local_bh_disable();
|
||||||
|
- addend = xt_write_recseq_begin();
|
||||||
|
- private = table->private;
|
||||||
|
- cpu = smp_processor_id();
|
||||||
|
- /*
|
||||||
|
- * Ensure we load private-> members after we've fetched the base
|
||||||
|
- * pointer.
|
||||||
|
- */
|
||||||
|
- smp_read_barrier_depends();
|
||||||
|
- table_base = private->entries[cpu];
|
||||||
|
- jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
|
||||||
|
- stackptr = per_cpu_ptr(private->stackptr, cpu);
|
||||||
|
- origptr = *stackptr;
|
||||||
|
-
|
||||||
|
- e = get_entry(table_base, private->hook_entry[hook]);
|
||||||
|
-
|
||||||
pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
|
pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
|
||||||
table->name, hook, origptr,
|
table->name, hook, origptr,
|
||||||
|
get_entry(table_base, private->underflow[hook]));
|
||||||
|
|
Loading…
Reference in a new issue