summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/ethtool.c4
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/skbuff.c49
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_options.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c91
-rw-r--r--net/ipv4/route.c16
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv6/ip6_output.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c87
-rw-r--r--net/ipv6/route.c24
-rw-r--r--net/key/af_key.c19
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c42
-rw-r--r--net/sched/sch_drr.c91
-rw-r--r--net/sched/sch_generic.c17
-rw-r--r--net/sched/sch_prio.c60
-rw-r--r--net/sched/sch_tbf.c75
-rw-r--r--net/tipc/netlink.c3
-rw-r--r--net/xfrm/xfrm_input.c12
-rw-r--r--net/xfrm/xfrm_output.c11
-rw-r--r--net/xfrm/xfrm_policy.c38
-rw-r--r--net/xfrm/xfrm_state.c94
-rw-r--r--net/xfrm/xfrm_user.c6
25 files changed, 772 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index ed96122..647ec24 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2556,6 +2556,17 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
!(features & NETIF_F_SG)));
}
+#ifdef CONFIG_ASF_EGRESS_QOS
+/* Linux QoS hook to tranfer all packet to ASF QoS */
+static asf_qos_fn_hook *asf_qos_fn;
+
+void asf_qos_fn_register(asf_qos_fn_hook *fn)
+{
+ asf_qos_fn = fn;
+}
+EXPORT_SYMBOL(asf_qos_fn_register);
+#endif
+
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
@@ -2837,12 +2848,20 @@ int dev_queue_xmit(struct sk_buff *skb)
skb_update_prio(skb);
+#ifdef CONFIG_ASF_EGRESS_QOS
+ if (asf_qos_fn) {
+ rc = asf_qos_fn(skb);
+ if (!rc)
+ goto out;
+ }
+#endif
txq = netdev_pick_tx(dev, skb);
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
#endif
+
trace_net_dev_queue(skb);
if (q->enqueue) {
rc = __dev_xmit_skb(skb, q, dev, txq);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 78e9d92..1342923 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -94,6 +94,10 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
[NETIF_F_LOOPBACK_BIT] = "loopback",
[NETIF_F_RXFCS_BIT] = "rx-fcs",
[NETIF_F_RXALL_BIT] = "rx-all",
+
+ /* Freescale DPA support */
+ [NETIF_F_HW_QDISC_BIT] = "hw-qdisc",
+ [NETIF_F_HW_ACCEL_MQ_BIT] = "hw-accel-mq",
};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 9d42f3b..e9e79df 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -490,13 +490,18 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
skb_reset_mac_header(skb);
skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
} else {
- udph->check = 0;
- udph->check = csum_tcpudp_magic(np->local_ip.ip,
+ /* Only querying the IPv4 csumming capabilities */
+ if (np->dev->features & NETIF_F_IP_CSUM)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ else {
+ skb->ip_summed = CHECKSUM_NONE;
+ udph->check = csum_tcpudp_magic(np->local_ip.ip,
np->remote_ip.ip,
udp_len, IPPROTO_UDP,
csum_partial(udph, udp_len, 0));
- if (udph->check == 0)
- udph->check = CSUM_MANGLED_0;
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
skb_push(skb, sizeof(*iph));
skb_reset_network_header(skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8ca51e8..86d6ffa 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -663,6 +663,55 @@ void consume_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(consume_skb);
+/**
+ * skb_recycle - clean up an skb for reuse
+ * @skb: buffer
+ *
+ * Recycles the skb to be reused as a receive buffer. This
+ * function does any necessary reference count dropping, and
+ * cleans up the skbuff as if it just came from __alloc_skb().
+ */
+void skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+ u8 head_frag = skb->head_frag;
+
+ skb_release_head_state(skb);
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb->head_frag = head_frag;
+ skb_reset_tail_pointer(skb);
+}
+EXPORT_SYMBOL(skb_recycle);
+
+/**
+ * skb_recycle_check - check if skb can be reused for receive
+ * @skb: buffer
+ * @skb_size: minimum receive buffer size
+ *
+ * Checks that the skb passed in is not shared or cloned, and
+ * that it is linear and its head portion at least as large as
+ * skb_size so that it can be recycled as a receive buffer.
+ * If these conditions are met, this function does any necessary
+ * reference count dropping and cleans up the skbuff as if it
+ * just came from __alloc_skb().
+ */
+bool skb_recycle_check(struct sk_buff *skb, int skb_size)
+{
+ if (!skb_is_recycleable(skb, skb_size))
+ return false;
+
+ skb_recycle(skb);
+
+ return true;
+}
+EXPORT_SYMBOL(skb_recycle_check);
+
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index bd1c5ba..29e55b6 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -198,3 +198,6 @@ drop:
kfree_skb(skb);
return NET_RX_DROP;
}
+#ifdef CONFIG_AS_FASTPATH
+EXPORT_SYMBOL(ip_forward);
+#endif
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 3d4da2c..c12f79b 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -257,7 +257,7 @@ int ip_local_deliver(struct sk_buff *skb)
ip_local_deliver_finish);
}
-static inline bool ip_rcv_options(struct sk_buff *skb)
+int ip_rcv_options(struct sk_buff *skb)
{
struct ip_options *opt;
const struct iphdr *iph;
@@ -305,6 +305,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
drop:
return true;
}
+EXPORT_SYMBOL(ip_rcv_options);
int sysctl_ip_early_demux __read_mostly = 1;
EXPORT_SYMBOL(sysctl_ip_early_demux);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 089ed81..c7da06e 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -605,6 +605,7 @@ void ip_forward_options(struct sk_buff *skb)
ip_send_check(ip_hdr(skb));
}
}
+EXPORT_SYMBOL(ip_forward_options);
int ip_options_rcv_srr(struct sk_buff *skb)
{
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 651c107..2e86cbc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -63,6 +63,19 @@ MODULE_DESCRIPTION("IPv4 packet filter");
#define inline
#endif
+#ifdef CONFIG_ASF_INGRESS_MARKER
+marker_add_hook *marker_add_fn;
+marker_flush_hook *marker_flush_fn;
+
+void marker_v4_hook_fn_register(marker_add_hook *add,
+ marker_flush_hook *flush)
+{
+ marker_add_fn = add;
+ marker_flush_fn = flush;
+}
+EXPORT_SYMBOL(marker_v4_hook_fn_register);
+#endif
+
void *ipt_alloc_initial_table(const struct xt_table *info)
{
return xt_alloc_initial_table(ipt, IPT);
@@ -818,6 +831,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
++newinfo->stacksize;
}
+
if (i != repl->num_entries) {
duprintf("translate_table: %u not %u entries\n",
i, repl->num_entries);
@@ -868,6 +882,65 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
+#ifdef CONFIG_ASF_INGRESS_MARKER
+ /* Rules has been verified now safe to offload to ASF */
+ if (marker_add_fn && (0 == strcmp(repl->name, "mangle"))) {
+ struct xt_entry_match *m;
+ struct xt_entry_target *t;
+ markerRule_t rules[MAX_MARKER_RULES] = {};
+ uint16_t *sport, *dport;
+ uint32_t num = 0;
+
+ /* Whether It is FLUSH request ? */
+ /* Note: num_entries are always equals to num_counters +1, when adding Rules
+ while num_entries comes as '6' as default value when FLUSH is required */
+ if ((repl->num_entries == 6) && (repl->num_entries < repl->num_counters)) {
+ if (marker_flush_fn)
+ marker_flush_fn();
+ return ret;
+ }
+ xt_entry_foreach(iter, entry0, newinfo->size)
+ {
+ /* Only POSTROUTING CHAINS */
+ if (iter->comefrom != (0x1 << NF_INET_POST_ROUTING))
+ continue;
+ if ((iter->ip.proto != 17/*UDP */) &&
+ (iter->ip.proto != 6/*TCP */))
+ continue;
+
+ if (num == MAX_MARKER_RULES) {
+ printk(KERN_INFO "Maximum %d Rule permitted\n",
+ MAX_MARKER_RULES);
+ break;
+ }
+ m = (void *)iter + sizeof(struct ipt_entry);
+ t = (void *)iter + iter->target_offset;
+ if (0 != strcmp(t->u.kernel.target->name, "DSCP"))
+ continue;
+
+ rules[num].src_ip[0] = iter->ip.src.s_addr;
+ rules[num].dst_ip[0] = iter->ip.dst.s_addr;
+ rules[num].proto = iter->ip.proto;
+ /* We are passing Port Mask instead of Value , since mask = value.
+ But when Port are not configured, we get 0xFFFF to indicate that
+ ANY port value is accepted. */
+ sport = (uint16_t *)&m->data[2];
+ dport = (uint16_t *)&m->data[6];
+ rules[num].src_port = *sport;
+ rules[num].dst_port = *dport;
+ rules[num].uciDscp = (t->data[0] << 2);
+
+ num++;
+ }
+ if (num > 0) {
+ marker_db_t arg;
+
+ arg.rule = &rules[0];
+ arg.num_rules = num;
+ marker_add_fn(&arg);
+ }
+ }
+#endif
return ret;
}
@@ -976,7 +1049,6 @@ copy_entries_to_user(unsigned int total_size,
goto free_counters;
}
}
-
t = ipt_get_target_c(e);
if (copy_to_user(userptr + off + e->target_offset
+ offsetof(struct xt_entry_target,
@@ -1168,6 +1240,16 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
return ret;
}
+#ifdef CONFIG_AS_FASTPATH
+void (*pfnfirewall_asfctrl)(void);
+
+void hook_firewall_asfctrl_cb(const struct firewall_asfctrl *fwasfctrl)
+{
+ pfnfirewall_asfctrl = fwasfctrl->firewall_asfctrl_cb;
+}
+EXPORT_SYMBOL(hook_firewall_asfctrl_cb);
+#endif
+
static int
__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table_info *newinfo, unsigned int num_counters,
@@ -1232,6 +1314,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
}
vfree(counters);
xt_table_unlock(t);
+
+#ifdef CONFIG_AS_FASTPATH
+ /* Call the ASF CTRL CB */
+ if (!ret && pfnfirewall_asfctrl)
+ pfnfirewall_asfctrl();
+#endif
+
return ret;
put_module:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f7fe946..15bd37d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -197,6 +197,10 @@ const __u8 ip_tos2prio[16] = {
};
EXPORT_SYMBOL(ip_tos2prio);
+#ifdef CONFIG_AS_FASTPATH
+static route_flush_hook *route_flush_fn;
+#endif
+
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
@@ -443,6 +447,10 @@ static inline bool rt_is_expired(const struct rtable *rth)
void rt_cache_flush(struct net *net)
{
rt_genid_bump_ipv4(net);
+#ifdef CONFIG_AS_FASTPATH
+ if (route_flush_fn)
+ route_flush_fn();
+#endif
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -2184,6 +2192,14 @@ out:
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
+#ifdef CONFIG_AS_FASTPATH
+void route_hook_fn_register(route_flush_hook *flush)
+{
+ route_flush_fn = flush;
+}
+EXPORT_SYMBOL(route_hook_fn_register);
+#endif
+
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
return NULL;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a880ccc..e403405 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1426,7 +1426,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
do {
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
last_issued, &done,
- &used) == DMA_SUCCESS) {
+ &used) == DMA_COMPLETE) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
@@ -1434,7 +1434,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
- used) == DMA_SUCCESS)) {
+ used) == DMA_COMPLETE)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 602533d..dc67e01 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -495,6 +495,7 @@ drop:
kfree_skb(skb);
return -EINVAL;
}
+EXPORT_SYMBOL(ip6_forward);
static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 89a4e4d..bba4791 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -63,6 +63,18 @@ MODULE_DESCRIPTION("IPv6 packet filter");
#define static
#define inline
#endif
+#ifdef CONFIG_ASF_INGRESS_MARKER
+marker_add_hook *marker_v6_add_fn;
+marker_flush_hook *marker_v6_flush_fn;
+
+void marker_v6_hook_fn_register(marker_add_hook *add,
+ marker_flush_hook *flush)
+{
+ marker_v6_add_fn = add;
+ marker_v6_flush_fn = flush;
+}
+EXPORT_SYMBOL(marker_v6_hook_fn_register);
+#endif
void *ip6t_alloc_initial_table(const struct xt_table *info)
{
@@ -878,6 +890,70 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
memcpy(newinfo->entries[i], entry0, newinfo->size);
}
+#ifdef CONFIG_ASF_INGRESS_MARKER
+ /* Rules has been verified now safe to offload to ASF */
+ if (marker_v6_add_fn && (0 == strcmp(repl->name, "mangle"))) {
+ struct xt_entry_match *m;
+ struct xt_entry_target *t;
+ markerRule_t rules[MAX_MARKER_RULES] = {};
+ uint16_t *sport, *dport;
+ uint32_t num = 0;
+
+ /* Whether It is FLUSH request ? */
+ /* Note: num_entries are always equals to num_counters +1, when adding Rules
+ while num_entries comes as '6' as default value when FLUSH is required */
+ if ((repl->num_entries == 6) && (repl->num_entries < repl->num_counters)) {
+ if (marker_v6_flush_fn)
+ marker_v6_flush_fn();
+ return ret;
+ }
+ xt_entry_foreach(iter, entry0, newinfo->size)
+ {
+ /* Only POSTROUTING CHAINS */
+ if (iter->comefrom != (0x1 << NF_INET_POST_ROUTING))
+ continue;
+ if ((iter->ipv6.proto != 17/*UDP */) && (iter->ipv6.proto != 6/*TCP */))
+ continue;
+
+ if (num == MAX_MARKER_RULES) {
+ printk(KERN_INFO "Maximum %d Rule permitted\n",
+ MAX_MARKER_RULES);
+ break;
+ }
+ m = (void *)iter + sizeof(struct ip6t_entry);
+ t = (void *)iter + iter->target_offset;
+ if (0 != strcmp(t->u.kernel.target->name, "DSCP"))
+ continue;
+
+ rules[num].src_ip[0] = iter->ipv6.src.in6_u.u6_addr32[0];
+ rules[num].src_ip[1] = iter->ipv6.src.in6_u.u6_addr32[1];
+ rules[num].src_ip[2] = iter->ipv6.src.in6_u.u6_addr32[2];
+ rules[num].src_ip[3] = iter->ipv6.src.in6_u.u6_addr32[3];
+ rules[num].dst_ip[0] = iter->ipv6.dst.in6_u.u6_addr32[0];
+ rules[num].dst_ip[1] = iter->ipv6.dst.in6_u.u6_addr32[1];
+ rules[num].dst_ip[2] = iter->ipv6.dst.in6_u.u6_addr32[2];
+ rules[num].dst_ip[3] = iter->ipv6.dst.in6_u.u6_addr32[3];
+ rules[num].proto = iter->ipv6.proto;
+ /* We are passing Port Mask instead of Value , since mask = value.
+ But when Port are not configured, we get 0xFFFF to indicate that
+ ANY port value is accepted. */
+ sport = (uint16_t *)&m->data[2];
+ dport = (uint16_t *)&m->data[6];
+ rules[num].src_port = *sport;
+ rules[num].dst_port = *dport;
+ rules[num].uciDscp = (t->data[0] << 2);
+
+ num++;
+ }
+ if (num > 0) {
+ marker_db_t arg;
+
+ arg.rule = &rules[0];
+ arg.num_rules = num;
+ marker_v6_add_fn(&arg);
+ }
+ }
+#endif
return ret;
}
@@ -1178,6 +1254,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
return ret;
}
+#ifdef CONFIG_AS_FASTPATH
+extern void (*pfnfirewall_asfctrl)(void);
+#endif
+
static int
__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table_info *newinfo, unsigned int num_counters,
@@ -1242,6 +1322,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
}
vfree(counters);
xt_table_unlock(t);
+
+#ifdef CONFIG_AS_FASTPATH
+ /* Call the ASF CTRL CB */
+ if (!ret && pfnfirewall_asfctrl)
+ pfnfirewall_asfctrl();
+#endif
+
return ret;
put_module:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b4bb6a2..ddb4e3d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -65,6 +65,10 @@
#include <linux/sysctl.h>
#endif
+#ifdef CONFIG_AS_FASTPATH
+static ipv6_route_flush_hook *ipv6_route_flush_fn;
+#endif
+
enum rt6_nud_state {
RT6_NUD_FAIL_HARD = -2,
RT6_NUD_FAIL_SOFT = -1,
@@ -849,6 +853,11 @@ static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info)
err = fib6_add(&table->tb6_root, rt, info);
write_unlock_bh(&table->tb6_lock);
+#ifdef CONFIG_AS_FASTPATH
+ if ((!err) && ipv6_route_flush_fn)
+ ipv6_route_flush_fn();
+#endif
+
return err;
}
@@ -1005,6 +1014,7 @@ void ip6_route_input(struct sk_buff *skb)
skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
}
+EXPORT_SYMBOL(ip6_route_input);
static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
struct flowi6 *fl6, int flags)
@@ -1714,6 +1724,12 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
out:
ip6_rt_put(rt);
+
+#ifdef CONFIG_AS_FASTPATH
+ if ((!err) && ipv6_route_flush_fn)
+ ipv6_route_flush_fn();
+#endif
+
return err;
}
@@ -3267,3 +3283,11 @@ void ip6_route_cleanup(void)
dst_entries_destroy(&ip6_dst_blackhole_ops);
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
}
+
+#ifdef CONFIG_AS_FASTPATH
+void ipv6_route_hook_fn_register(ipv6_route_flush_hook *flush)
+{
+ ipv6_route_flush_fn = flush;
+}
+EXPORT_SYMBOL(ipv6_route_hook_fn_register);
+#endif
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 545f047..dc66136 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3049,6 +3049,24 @@ static u32 get_acqseq(void)
return res;
}
+static bool pfkey_is_alive(const struct km_event *c)
+{
+ struct netns_pfkey *net_pfkey = net_generic(c->net, pfkey_net_id);
+ struct sock *sk;
+ bool is_alive = false;
+
+ rcu_read_lock();
+ sk_for_each_rcu(sk, &net_pfkey->table) {
+ if (pfkey_sk(sk)->registered) {
+ is_alive = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_alive;
+}
+
static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
{
struct sk_buff *skb;
@@ -3773,6 +3791,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
.new_mapping = pfkey_send_new_mapping,
.notify_policy = pfkey_send_policy_notify,
.migrate = pfkey_send_migrate,
+ .is_alive = pfkey_is_alive,
};
static int __net_init pfkey_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 44d1ea3..891070e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -706,6 +706,48 @@ static bool tcp_in_window(const struct nf_conn *ct,
state->retrans = 0;
}
}
+
+#ifdef CONFIG_AS_FASTPATH
+ state->seen[dir].td_delta = receiver_offset;
+ state->seen[dir].td_rcvwin = win;
+ /* Setting Time stamp */
+ {
+ unsigned char *tcpopt;
+ unsigned char *endptr;
+ int optlen;
+ tcpopt = (unsigned char *)(tcph) + 20;
+ optlen = tcph->doff * 4 - 20;
+ if (optlen > 0) {
+ endptr = tcpopt + optlen;
+ while (tcpopt < endptr) {
+ if (tcpopt[1] <= 0)
+ break;
+
+ switch (*tcpopt) {
+ case TCPOPT_EOL:
+ case TCPOPT_NOP:
+ tcpopt++;
+ break;
+ case TCPOPT_MSS:
+ tcpopt += 4; /* 4 byte option length */
+ break;
+ case TCPOPT_WINDOW:
+ tcpopt += 3; /* 3 byte option length */
+ break;
+ case TCPOPT_TIMESTAMP:
+ state->seen[dir].td_tcptimestamp =
+ ntohl(*((unsigned long *)
+ (tcpopt + 2)));
+ goto DONE;
+ default:
+ tcpopt += tcpopt[1];
+ break;
+ }
+ }
+ }
+ }
+DONE:
+#endif
res = true;
} else {
res = false;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 8302717..95cb9e7 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -18,6 +18,18 @@
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+static inline void _drr_add_hook(struct Qdisc *sch,
+ uint32_t classid,
+ uint32_t quantum);
+static inline void _drr_flush_hook(struct Qdisc *sch);
+
+/* Define ADD/DELETE Hooks */
+static drr_add_hook *drr_add_fn;
+static drr_flush_hook *drr_flush_fn;
+static invalidate_flows *drr_invalidate;
+#endif
+
struct drr_class {
struct Qdisc_class_common common;
unsigned int refcnt;
@@ -201,7 +213,10 @@ static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
if (cl != NULL)
cl->filter_cnt++;
-
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ if (drr_invalidate)
+ drr_invalidate();
+#endif
return (unsigned long)cl;
}
@@ -210,6 +225,10 @@ static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
struct drr_class *cl = (struct drr_class *)arg;
cl->filter_cnt--;
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ if (drr_invalidate)
+ drr_invalidate();
+#endif
}
static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
@@ -262,6 +281,9 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
goto nla_put_failure;
if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
goto nla_put_failure;
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ _drr_add_hook(sch, cl->common.classid, cl->quantum);
+#endif
return nla_nest_end(skb, nest);
nla_put_failure:
@@ -442,6 +464,9 @@ static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
return err;
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ _drr_add_hook(sch, 0, 0);
+#endif
INIT_LIST_HEAD(&q->active);
return 0;
}
@@ -477,7 +502,71 @@ static void drr_destroy_qdisc(struct Qdisc *sch)
drr_destroy_class(sch, cl);
}
qdisc_class_hash_destroy(&q->clhash);
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ _drr_flush_hook(sch);
+#endif
+}
+
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+static inline void _drr_add_hook(
+ struct Qdisc *sch,
+ uint32_t classid,
+ uint32_t quantum)
+{
+ int ret;
+
+ if (drr_add_fn) {
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (classid)
+ ret = drr_add_fn(dev, classid, sch->handle, quantum);
+ else
+ ret = drr_add_fn(dev, sch->handle, sch->parent, 0);
+
+ if (ret < 0)
+ printk(KERN_DEBUG "%s: DRR Creation on %s:"
+ " fail: handle 0x%X\n",
+ __func__, dev->name, sch->handle);
+ }
+}
+
+static inline void _drr_flush_hook(struct Qdisc *sch)
+{
+
+ if (drr_flush_fn) {
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (drr_flush_fn(dev, sch->handle, sch->parent) < 0) {
+ printk(KERN_DEBUG "%s: DRR Fush on %s: fail: handle 0x%X\n",
+ __func__, dev->name, sch->handle);
+ }
+ }
+}
+
+u32 drr_filter_lookup(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct drr_class *cl;
+ int err;
+
+ cl = drr_classify(skb, sch, &err);
+ if (cl == NULL) {
+ /* Rule not found, must DROP */
+ return 0;
+ }
+ return cl->common.classid;
}
+EXPORT_SYMBOL(drr_filter_lookup);
+
+void drr_hook_fn_register(drr_add_hook *add,
+ drr_flush_hook *flush,
+ invalidate_flows *invalidate)
+{
+ drr_add_fn = add;
+ drr_flush_fn = flush;
+ drr_invalidate = invalidate;
+}
+EXPORT_SYMBOL(drr_hook_fn_register);
+#endif
static const struct Qdisc_class_ops drr_class_ops = {
.change = drr_change_class,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 1dcebc9..e87e6f9 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -248,8 +248,23 @@ static void dev_watchdog(unsigned long arg)
txq = netdev_get_tx_queue(dev, i);
/*
* old device drivers set dev->trans_start
+ *
+ * (Actually, not only "old" devices, but also
+ * those which perform queue management in a
+ * separate hw accelerator. So even though the
+ * net device itself is single-queued, it makes
+ * sense (and is safe, too) to use kernel's
+ * multiqueue interface, specifically to avoid
+ * unnecessary device locking in SMP systems.
+ * In this case, we ought to consider not an
+ * individual txq's timestamp as a congestion
+ * indicator, but the "old" per-netdev field.)
*/
- trans_start = txq->trans_start ? : dev->trans_start;
+ if (dev->features & NETIF_F_HW_ACCEL_MQ)
+ trans_start = dev->trans_start;
+ else
+ trans_start = txq->trans_start ? :
+ dev->trans_start;
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 79359b6..868ab9a 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -22,6 +22,16 @@
#include <net/pkt_sched.h>
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+static inline void _prio_add_hook(struct Qdisc *sch,
+ uint32_t bands);
+static inline void _prio_flush_hook(struct Qdisc *sch);
+
+/* Define ADD/DELETE Hooks */
+static prio_add_hook *prio_add_fn;
+static prio_flush_hook *prio_flush_fn;
+#endif
+
struct prio_sched_data {
int bands;
struct tcf_proto *filter_list;
@@ -161,6 +171,11 @@ prio_destroy(struct Qdisc *sch)
tcf_destroy_chain(&q->filter_list);
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
+
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ /* Invoke PRIO Qdisc Deletiion */
+ _prio_flush_hook(sch);
+#endif
}
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
@@ -234,6 +249,12 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
if ((err = prio_tune(sch, opt)) != 0)
return err;
+
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+ /* PRIO Qdisc creation is complete, now safe to offload */
+ _prio_add_hook(sch, q->bands);
+#endif
+
}
return 0;
}
@@ -360,6 +381,45 @@ static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
return &q->filter_list;
}
+#if defined(CONFIG_ASF_EGRESS_SCH) || defined(CONFIG_ASF_HW_SCH)
+static inline void _prio_add_hook(
+ struct Qdisc *sch,
+ uint32_t bands)
+{
+ if (prio_add_fn) {
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (prio_add_fn(dev, sch->handle, sch->parent, bands) < 0) {
+ printk(KERN_DEBUG "%s: PRIO Creation on %s: fail: handle 0x%X\n",
+ __func__, dev->name, sch->handle);
+ }
+ }
+}
+
+static inline void _prio_flush_hook(struct Qdisc *sch)
+{
+
+ if (prio_flush_fn) {
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (prio_flush_fn(dev, sch->handle, sch->parent) < 0) {
+ printk(KERN_DEBUG "%s: PRIO Fush on %s: fail: handle 0x%X\n",
+ __func__, dev->name, sch->handle);
+ }
+ }
+}
+
+
+void prio_hook_fn_register(prio_add_hook *add,
+ prio_flush_hook *flush)
+{
+ prio_add_fn = add;
+ prio_flush_fn = flush;
+}
+EXPORT_SYMBOL(prio_hook_fn_register);
+#endif
+
+
static const struct Qdisc_class_ops prio_class_ops = {
.graft = prio_graft,
.leaf = prio_leaf,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index fecd35a..29e1c2a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -98,6 +98,19 @@
changed the limit is not effective anymore.
*/
+#if defined(CONFIG_ASF_EGRESS_SHAPER) || defined(CONFIG_ASF_HW_SHAPER)
+static inline void _tbf_add_hook(struct Qdisc *sch,
+ uint32_t rate,
+ uint32_t limit,
+ uint32_t buffer,
+ uint16_t mpu);
+static inline void _tbf_del_hook(struct Qdisc *sch);
+
+/* Define ADD/DELETE Hooks */
+static tbf_add_hook *tbf_add_fn;
+static tbf_del_hook *tbf_del_fn;
+#endif
+
struct tbf_sched_data {
/* Parameters */
u32 limit; /* Maximal length of backlog: bytes */
@@ -362,6 +375,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_unlock(sch);
err = 0;
+#if defined(CONFIG_ASF_EGRESS_SHAPER) || defined(CONFIG_ASF_HW_SHAPER)
+ _tbf_add_hook(sch, qopt->rate.rate, qopt->limit,
+ qopt->buffer, qopt->rate.mpu);
+#endif
+
done:
if (rtab)
qdisc_put_rtab(rtab);
@@ -390,6 +408,9 @@ static void tbf_destroy(struct Qdisc *sch)
qdisc_watchdog_cancel(&q->watchdog);
qdisc_destroy(q->qdisc);
+#if defined(CONFIG_ASF_EGRESS_SHAPER) || defined(CONFIG_ASF_HW_SHAPER)
+ _tbf_del_hook(sch);
+#endif
}
static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -478,6 +499,60 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
+#if defined(CONFIG_ASF_EGRESS_SHAPER) || defined(CONFIG_ASF_HW_SHAPER)
+static inline void _tbf_add_hook(struct Qdisc *sch,
+ uint32_t rate,
+ uint32_t limit,
+ uint32_t buffer,
+ uint16_t mpu
+)
+{
+ if (tbf_add_fn) {
+ struct tbf_opt opt;
+
+ opt.dev = qdisc_dev(sch);
+ opt.handle = sch->handle;
+ opt.parent = sch->parent;
+ opt.rate = rate;
+ opt.limit = limit;
+ opt.buffer = buffer;
+ opt.mpu = mpu;
+
+ if (tbf_add_fn(&opt) < 0) {
+ printk(KERN_DEBUG "%s: TBF Creation on %s: fail: handle 0x%X\n",
+ __func__, opt.dev->name, sch->handle);
+ }
+ }
+}
+
+static inline void _tbf_del_hook(struct Qdisc *sch)
+{
+
+ if (tbf_del_fn) {
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (tbf_del_fn(dev, sch->handle, sch->parent) < 0) {
+ printk(KERN_DEBUG "%s: TBF Qdisc DEL on %s: fail: handle 0x%X\n",
+ __func__, dev->name, sch->handle);
+ }
+ }
+}
+struct Qdisc *tbf_get_inner_qdisc(struct Qdisc *sch)
+{
+ struct tbf_sched_data *q = qdisc_priv(sch);
+ return q->qdisc;
+}
+EXPORT_SYMBOL(tbf_get_inner_qdisc);
+
+void tbf_hook_fn_register(tbf_add_hook *add,
+ tbf_del_hook *del)
+{
+ tbf_add_fn = add;
+ tbf_del_fn = del;
+}
+EXPORT_SYMBOL(tbf_hook_fn_register);
+#endif
+
static const struct Qdisc_class_ops tbf_class_ops = {
.graft = tbf_graft,
.leaf = tbf_leaf,
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 1e6081f..d64486e 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
rep_nlh = nlmsg_hdr(rep_buf);
memcpy(rep_nlh, req_nlh, hdr_space);
rep_nlh->nlmsg_len = rep_buf->len;
- genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
+ genlmsg_unicast(genl_info_net(info), rep_buf, NETLINK_CB(skb).portid);
}
return 0;
@@ -74,6 +74,7 @@ static struct genl_family tipc_genl_family = {
.version = TIPC_GENL_VERSION,
.hdrsize = TIPC_GENL_HDRLEN,
.maxattr = 0,
+ .netnsok = true,
};
static struct genl_ops tipc_genl_ops = {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 8884399..677ff52 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -178,6 +178,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop_unlock;
}
+#ifdef CONFIG_AS_FASTPATH
+ if (!x->asf_sa_cookie && asf_cb_fns.ipsec_dec_hook)
+ asf_cb_fns.ipsec_dec_hook(NULL, x, NULL, skb->skb_iif);
+
+ spin_unlock(&x->lock);
+ if (x->asf_sa_cookie && asf_cb_fns.ipsec_decrypt_n_send) {
+ if (!asf_cb_fns.ipsec_decrypt_n_send(skb, x))
+ return 0;
+ }
+ spin_lock(&x->lock);
+#endif
+
if (x->repl->check(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3bb2cdc..9a1e078 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -54,6 +54,17 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
goto error_nolock;
}
+#ifdef CONFIG_AS_FASTPATH
+ if (!x->asf_sa_cookie && asf_cb_fns.ipsec_enc_hook)
+ asf_cb_fns.ipsec_enc_hook(NULL, x, NULL, skb->skb_iif);
+
+ if (x->asf_sa_cookie && asf_cb_fns.ipsec_encrypt_n_send) {
+ err = -EINPROGRESS;
+ if (!asf_cb_fns.ipsec_encrypt_n_send(skb, x))
+ goto out;
+ }
+#endif
+
err = x->outer_mode->output(x, skb);
if (err) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 6b07a59..823e48c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -65,6 +65,33 @@ static void xfrm_policy_queue_process(unsigned long arg);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
+#ifdef CONFIG_AS_FASTPATH
+struct asf_ipsec_callbackfn_s asf_cb_fns = {0};
+
+void register_ipsec_offload_hook(struct asf_ipsec_callbackfn_s *p_fn_list)
+{
+ asf_cb_fns.ipsec_enc_hook = p_fn_list->ipsec_enc_hook;
+ asf_cb_fns.ipsec_dec_hook = p_fn_list->ipsec_dec_hook;
+ asf_cb_fns.ipsec_sync_sa = p_fn_list->ipsec_sync_sa;
+ asf_cb_fns.ipsec_encrypt_n_send
+ = p_fn_list->ipsec_encrypt_n_send;
+ asf_cb_fns.ipsec_decrypt_n_send
+ = p_fn_list->ipsec_decrypt_n_send;
+
+}
+EXPORT_SYMBOL(register_ipsec_offload_hook);
+
+void unregister_ipsec_offload_hook(void)
+{
+ asf_cb_fns.ipsec_enc_hook = NULL;
+ asf_cb_fns.ipsec_dec_hook = NULL;
+ asf_cb_fns.ipsec_sync_sa = NULL;
+ asf_cb_fns.ipsec_encrypt_n_send = NULL;
+ asf_cb_fns.ipsec_decrypt_n_send = NULL;
+}
+EXPORT_SYMBOL(unregister_ipsec_offload_hook);
+#endif /* CONFIG_AS_FASTPATH */
+
static inline bool
__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
{
@@ -678,6 +705,11 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
__xfrm_policy_unlink(delpol, dir);
}
policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
+
+#ifdef CONFIG_AS_FASTPATH
+ policy->asf_cookie = delpol ? delpol->asf_cookie : 0;
+#endif
+
hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
policy->curlft.add_time = get_seconds();
policy->curlft.use_time = 0;
@@ -1048,6 +1080,7 @@ __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir
#endif
return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
}
+EXPORT_SYMBOL(__xfrm_policy_lookup);
static int flow_to_policy_dir(int dir)
{
@@ -1236,6 +1269,11 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
newp->xfrm_nr = old->xfrm_nr;
newp->index = old->index;
newp->type = old->type;
+
+#ifdef CONFIG_AS_FASTPATH
+ newp->asf_cookie = old->asf_cookie;
+#endif
+
memcpy(newp->xfrm_vec, old->xfrm_vec,
newp->xfrm_nr*sizeof(struct xfrm_tmpl));
write_lock_bh(&xfrm_policy_lock);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b9c3f9e..a5bc192 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -163,6 +163,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
+bool km_is_alive(const struct km_event *c);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
static DEFINE_SPINLOCK(xfrm_type_lock);
@@ -798,6 +799,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
unsigned short encap_family = tmpl->encap_family;
+ struct km_event c;
to_put = NULL;
@@ -842,6 +844,17 @@ found:
error = -EEXIST;
goto out;
}
+
+ c.net = net;
+ /* If the KMs have no listeners (yet...), avoid allocating an SA
+ * for each and every packet - garbage collection might not
+ * handle the flood.
+ */
+ if (!km_is_alive(&c)) {
+ error = -ESRCH;
+ goto out;
+ }
+
x = xfrm_state_alloc(net);
if (x == NULL) {
error = -ENOMEM;
@@ -1627,6 +1640,69 @@ static void xfrm_replay_timer_handler(unsigned long data)
spin_unlock(&x->lock);
}
+#ifdef CONFIG_AS_FASTPATH
+struct xfrm_policy *xfrm_state_policy_mapping(struct xfrm_state *xfrm)
+{
+ struct xfrm_policy *xp = 0, *matched_pol = 0;
+ struct net *xfrm_net = xs_net(xfrm);
+ struct list_head *list_policy_head = &xfrm_net->xfrm.policy_all;
+ struct xfrm_policy_walk_entry *x;
+ struct xfrm_tmpl *tmpl;
+ unsigned int dir;
+
+ if (!list_policy_head) {
+ printk(KERN_INFO "No Security Policies in the system\n");
+ return matched_pol;
+ }
+ x = list_first_entry(list_policy_head,
+ struct xfrm_policy_walk_entry, all);
+ if (!x) {
+ printk(KERN_INFO "Security Policies list is empty\n");
+ return matched_pol;
+ }
+ if (xfrm->props.family == AF_INET) {
+ list_for_each_entry_from(x, list_policy_head, all) {
+ if (x->dead)
+ continue;
+ xp = container_of(x, struct xfrm_policy, walk);
+ tmpl = &xp->xfrm_vec[0];
+ dir = xfrm_policy_id2dir(xp->index);
+ if (dir <= XFRM_POLICY_OUT &&
+ tmpl->id.daddr.a4 == xfrm->id.daddr.a4 &&
+ tmpl->saddr.a4 == xfrm->props.saddr.a4 &&
+ xfrm->props.reqid == tmpl->reqid &&
+ xfrm->props.mode == tmpl->mode) {
+ matched_pol = xp;
+ xfrm->asf_sa_direction = dir;
+ break;
+ }
+ }
+ } else if (xfrm->props.family == AF_INET6) {
+ list_for_each_entry_from(x, list_policy_head, all) {
+ if (x->dead)
+ continue;
+ xp = container_of(x, struct xfrm_policy, walk);
+ tmpl = &xp->xfrm_vec[0];
+ dir = xfrm_policy_id2dir(xp->index);
+ if (dir <= XFRM_POLICY_OUT &&
+ !memcmp(tmpl->id.daddr.a6,
+ xfrm->id.daddr.a6, 16) &&
+ !memcmp(tmpl->saddr.a6,
+ xfrm->props.saddr.a6, 16) &&
+ xfrm->props.reqid == tmpl->reqid &&
+ xfrm->props.mode == tmpl->mode) {
+ matched_pol = xp;
+ xfrm->asf_sa_direction = dir;
+ break;
+ }
+ }
+ } else
+ return NULL;
+
+ return matched_pol;
+}
+EXPORT_SYMBOL(xfrm_state_policy_mapping);
+#endif
static LIST_HEAD(xfrm_km_list);
void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -1762,6 +1838,24 @@ int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address
}
EXPORT_SYMBOL(km_report);
+bool km_is_alive(const struct km_event *c)
+{
+ struct xfrm_mgr *km;
+ bool is_alive = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(km, &xfrm_km_list, list) {
+ if (km->is_alive && km->is_alive(c)) {
+ is_alive = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_alive;
+}
+EXPORT_SYMBOL(km_is_alive);
+
int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
int err;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 32a2dd3..716ee00 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -3001,6 +3001,11 @@ static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
}
+static bool xfrm_is_alive(const struct km_event *c)
+{
+ return (bool)xfrm_acquire_is_on(c->net);
+}
+
static struct xfrm_mgr netlink_mgr = {
.id = "netlink",
.notify = xfrm_send_state_notify,
@@ -3010,6 +3015,7 @@ static struct xfrm_mgr netlink_mgr = {
.report = xfrm_send_report,
.migrate = xfrm_send_migrate,
.new_mapping = xfrm_send_mapping,
+ .is_alive = xfrm_is_alive,
};
static int __net_init xfrm_user_net_init(struct net *net)