diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-20 18:07:37 (GMT) |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-20 18:07:37 (GMT) |
commit | aac3942cedc339b1e7b6bad28f3abe4ceb15bcc3 (patch) | |
tree | 49abe1f74a0a3290cf611878a704914a9f5e6577 | |
parent | 6f458dfb409272082c9bfa412f77ff2fc21c626f (diff) | |
parent | 6c85f2bdda2086d804e198a3f31b685bc2f86b04 (diff) | |
download | linux-aac3942cedc339b1e7b6bad28f3abe4ceb15bcc3.tar.xz |
Merge branch 'team_multiq'
Jiri Pirko says:
====================
This patchset represents the way I walked when I was adding multiqueue
support for team driver.
Jiri Pirko (6):
net: honour netif_set_real_num_tx_queues() retval
rtnl: allow to specify different num for rx and tx queue count
rtnl: allow to specify number of rx and tx queues on device creation
net: rename bond_queue_mapping to slave_dev_queue_mapping
bond_sysfs: use ream_num_tx_queues rather than params.tx_queue
team: add multiqueue support
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/bonding/bond_main.c | 20 | ||||
-rw-r--r-- | drivers/net/bonding/bond_sysfs.c | 2 | ||||
-rw-r--r-- | drivers/net/team/team.c | 65 | ||||
-rw-r--r-- | include/linux/if_link.h | 2 | ||||
-rw-r--r-- | include/linux/if_team.h | 8 | ||||
-rw-r--r-- | include/linux/netdevice.h | 7 | ||||
-rw-r--r-- | include/net/rtnetlink.h | 10 | ||||
-rw-r--r-- | include/net/sch_generic.h | 2 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 27 |
9 files changed, 114 insertions, 29 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3960b1b..6fae5f3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -395,8 +395,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, skb->dev = slave_dev; BUILD_BUG_ON(sizeof(skb->queue_mapping) != - sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); - skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; + sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; if (unlikely(netpoll_tx_running(slave_dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); @@ -4184,7 +4184,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) /* * Save the original txq to restore before passing to the driver */ - qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; if (unlikely(txq >= dev->real_num_tx_queues)) { do { @@ -4845,17 +4845,19 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } -static int bond_get_tx_queues(struct net *net, struct nlattr *tb[]) +static unsigned int bond_get_num_tx_queues(void) { return tx_queues; } static struct rtnl_link_ops bond_link_ops __read_mostly = { - .kind = "bond", - .priv_size = sizeof(struct bonding), - .setup = bond_setup, - .validate = bond_validate, - .get_tx_queues = bond_get_tx_queues, + .kind = "bond", + .priv_size = sizeof(struct bonding), + .setup = bond_setup, + .validate = bond_validate, + .get_num_tx_queues = bond_get_num_tx_queues, + .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number + as for TX queues */ }; /* Create a new bond based on the specified name and bonding parameters. diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 485bedb..dc15d24 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -1495,7 +1495,7 @@ static ssize_t bonding_store_queue_id(struct device *d, /* Check buffer length, valid ifname and queue id */ if (strlen(buffer) > IFNAMSIZ || !dev_valid_name(buffer) || - qid > bond->params.tx_queues) + qid > bond->dev->real_num_tx_queues) goto err_no_cmd; /* Get the pointer to that interface if it exists */ diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 813e131..b104c05 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -27,6 +27,7 @@ #include <net/rtnetlink.h> #include <net/genetlink.h> #include <net/netlink.h> +#include <net/sch_generic.h> #include <linux/if_team.h> #define DRV_NAME "team" @@ -1121,6 +1122,22 @@ static const struct team_option team_options[] = { }, }; +static struct lock_class_key team_netdev_xmit_lock_key; +static struct lock_class_key team_netdev_addr_lock_key; + +static void team_set_lockdep_class_one(struct net_device *dev, + struct netdev_queue *txq, + void *unused) +{ + lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); +} + +static void team_set_lockdep_class(struct net_device *dev) +{ + lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); + netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); +} + static int team_init(struct net_device *dev) { struct team *team = netdev_priv(dev); @@ -1148,6 +1165,8 @@ static int team_init(struct net_device *dev) goto err_options_register; netif_carrier_off(dev); + team_set_lockdep_class(dev); + return 0; err_options_register: @@ -1216,6 +1235,29 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + /* + * This helper function exists to help dev_pick_tx get the correct + * destination queue. Using a helper function skips a call to + * skb_tx_hash and will put the skbs in the queue we expect on their + * way down to the team driver. + */ + u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + + /* + * Save the original txq to restore before passing to the driver + */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + if (unlikely(txq >= dev->real_num_tx_queues)) { + do { + txq -= dev->real_num_tx_queues; + } while (txq >= dev->real_num_tx_queues); + } + return txq; +} + static void team_change_rx_flags(struct net_device *dev, int change) { struct team *team = netdev_priv(dev); @@ -1469,6 +1511,7 @@ static const struct net_device_ops team_netdev_ops = { .ndo_open = team_open, .ndo_stop = team_close, .ndo_start_xmit = team_xmit, + .ndo_select_queue = team_select_queue, .ndo_change_rx_flags = team_change_rx_flags, .ndo_set_rx_mode = team_set_rx_mode, .ndo_set_mac_address = team_set_mac_address, @@ -1543,12 +1586,24 @@ static int team_validate(struct nlattr *tb[], struct nlattr *data[]) return 0; } +static unsigned int team_get_num_tx_queues(void) +{ + return TEAM_DEFAULT_NUM_TX_QUEUES; +} + +static unsigned int team_get_num_rx_queues(void) +{ + return TEAM_DEFAULT_NUM_RX_QUEUES; +} + static struct rtnl_link_ops team_link_ops __read_mostly = { - .kind = DRV_NAME, - .priv_size = sizeof(struct team), - .setup = team_setup, - .newlink = team_newlink, - .validate = team_validate, + .kind = DRV_NAME, + .priv_size = sizeof(struct team), + .setup = team_setup, + .newlink = team_newlink, + .validate = team_validate, + .get_num_tx_queues = team_get_num_tx_queues, + .get_num_rx_queues = team_get_num_rx_queues, }; diff --git a/include/linux/if_link.h b/include/linux/if_link.h index f715750..ac173bd 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -140,6 +140,8 @@ enum { IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ #define IFLA_PROMISCUITY IFLA_PROMISCUITY + IFLA_NUM_TX_QUEUES, + IFLA_NUM_RX_QUEUES, __IFLA_MAX }; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 7fd0cde..6960fc1 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -14,6 +14,7 @@ #ifdef __KERNEL__ #include <linux/netpoll.h> +#include <net/sch_generic.h> struct team_pcpu_stats { u64 rx_packets; @@ -98,6 +99,10 @@ static inline void team_netpoll_send_skb(struct team_port *port, static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, struct sk_buff *skb) { + BUILD_BUG_ON(sizeof(skb->queue_mapping) != + sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); + skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); + skb->dev = port->dev; if (unlikely(netpoll_tx_running(port->dev))) { team_netpoll_send_skb(port, skb); @@ -236,6 +241,9 @@ extern void team_options_unregister(struct team *team, extern int team_mode_register(const struct team_mode *mode); extern void team_mode_unregister(const struct team_mode *mode); +#define TEAM_DEFAULT_NUM_TX_QUEUES 16 +#define TEAM_DEFAULT_NUM_RX_QUEUES 16 + #endif /* __KERNEL__ */ #define TEAM_STRING_MAX_LEN 32 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ab0251d..eb06e58 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2110,7 +2110,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, static inline int netif_copy_real_num_queues(struct net_device *to_dev, const struct net_device *from_dev) { - netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); + int err; + + err = netif_set_real_num_tx_queues(to_dev, + from_dev->real_num_tx_queues); + if (err) + return err; #ifdef CONFIG_RPS return netif_set_real_num_rx_queues(to_dev, from_dev->real_num_rx_queues); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index bbcfd09..6b00c4f 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -44,8 +44,10 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * @get_xstats_size: Function to calculate required room for dumping device * specific statistics * @fill_xstats: Function to dump device specific statistics - * @get_tx_queues: Function to determine number of transmit queues to create when - * creating a new device. + * @get_num_tx_queues: Function to determine number of transmit queues + * to create when creating a new device. + * @get_num_rx_queues: Function to determine number of receive queues + * to create when creating a new device. */ struct rtnl_link_ops { struct list_head list; @@ -77,8 +79,8 @@ struct rtnl_link_ops { size_t (*get_xstats_size)(const struct net_device *dev); int (*fill_xstats)(struct sk_buff *skb, const struct net_device *dev); - int (*get_tx_queues)(struct net *net, - struct nlattr *tb[]); + unsigned int (*get_num_tx_queues)(void); + unsigned int (*get_num_rx_queues)(void); }; extern int __rtnl_link_register(struct rtnl_link_ops *ops); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 9d7d54a..d9611e0 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -220,7 +220,7 @@ struct tcf_proto { struct qdisc_skb_cb { unsigned int pkt_len; - u16 bond_queue_mapping; + u16 slave_dev_queue_mapping; u16 _pad; unsigned char data[20]; }; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 045db8a..5bb1ebc 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -771,6 +771,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_LINK */ + nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_PROMISCUITY */ + + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ + + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(ext_filter_mask @@ -889,6 +891,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u32(skb, IFLA_GROUP, dev->group) || nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || + nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || + nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || (dev->ifindex != dev->iflink && nla_put_u32(skb, IFLA_LINK, dev->iflink)) || (dev->master && @@ -1106,6 +1110,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_AF_SPEC] = { .type = NLA_NESTED }, [IFLA_EXT_MASK] = { .type = NLA_U32 }, [IFLA_PROMISCUITY] = { .type = NLA_U32 }, + [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, + [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, }; EXPORT_SYMBOL(ifla_policy); @@ -1624,17 +1630,22 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, { int err; struct net_device *dev; - unsigned int num_queues = 1; + unsigned int num_tx_queues = 1; + unsigned int num_rx_queues = 1; - if (ops->get_tx_queues) { - err = ops->get_tx_queues(src_net, tb); - if (err < 0) - goto err; - num_queues = err; - } + if (tb[IFLA_NUM_TX_QUEUES]) + num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); + else if (ops->get_num_tx_queues) + num_tx_queues = ops->get_num_tx_queues(); + + if (tb[IFLA_NUM_RX_QUEUES]) + num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]); + else if (ops->get_num_rx_queues) + num_rx_queues = ops->get_num_rx_queues(); err = -ENOMEM; - dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); + dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup, + num_tx_queues, num_rx_queues); if (!dev) goto err; |