summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h128
1 files changed, 78 insertions, 50 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7dfaae7..1f77540 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -33,12 +33,12 @@
#ifdef __KERNEL__
#include <linux/pm_qos.h>
#include <linux/timer.h>
+#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
-#include <linux/device.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/dmaengine.h>
@@ -56,6 +56,7 @@
#include <linux/netdev_features.h>
struct netpoll_info;
+struct device;
struct phy_device;
/* 802.11 specific */
struct wireless_dev;
@@ -417,7 +418,7 @@ typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
extern void __napi_schedule(struct napi_struct *n);
-static inline int napi_disable_pending(struct napi_struct *n)
+static inline bool napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
@@ -431,7 +432,7 @@ static inline int napi_disable_pending(struct napi_struct *n)
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
-static inline int napi_schedule_prep(struct napi_struct *n)
+static inline bool napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
@@ -451,13 +452,13 @@ static inline void napi_schedule(struct napi_struct *n)
}
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline int napi_reschedule(struct napi_struct *napi)
+static inline bool napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -1082,7 +1083,8 @@ struct net_device {
const struct header_ops *header_ops;
unsigned int flags; /* interface flags (a la BSD) */
- unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
+ unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
+ * See if.h for definitions. */
unsigned short gflags;
unsigned short padded; /* How much padding added by alloc_netdev() */
@@ -1867,7 +1869,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
}
}
-static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
}
@@ -1878,17 +1880,17 @@ static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
*
* Test if transmit queue on device is currently unable to send.
*/
-static inline int netif_queue_stopped(const struct net_device *dev)
+static inline bool netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
-static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF;
}
-static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
{
return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
}
@@ -1898,12 +1900,22 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
{
#ifdef CONFIG_BQL
dql_queued(&dev_queue->dql, bytes);
- if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
- set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
- if (unlikely(dql_avail(&dev_queue->dql) >= 0))
- clear_bit(__QUEUE_STATE_STACK_XOFF,
- &dev_queue->state);
- }
+
+ if (likely(dql_avail(&dev_queue->dql) >= 0))
+ return;
+
+ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+
+ /*
+ * The XOFF flag must be set before checking the dql_avail below,
+ * because in netdev_tx_completed_queue we update the dql_completed
+ * before checking the XOFF flag.
+ */
+ smp_mb();
+
+ /* check again in case another CPU has just made room avail */
+ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
#endif
}
@@ -1916,16 +1928,23 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned pkts, unsigned bytes)
{
#ifdef CONFIG_BQL
- if (likely(bytes)) {
- dql_completed(&dev_queue->dql, bytes);
- if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
- &dev_queue->state) &&
- dql_avail(&dev_queue->dql) >= 0)) {
- if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
- &dev_queue->state))
- netif_schedule_queue(dev_queue);
- }
- }
+ if (unlikely(!bytes))
+ return;
+
+ dql_completed(&dev_queue->dql, bytes);
+
+ /*
+ * Without the memory barrier there is a small possiblity that
+ * netdev_tx_sent_queue will miss the update and cause the queue to
+ * be stopped forever
+ */
+ smp_mb();
+
+ if (dql_avail(&dev_queue->dql) < 0)
+ return;
+
+ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
+ netif_schedule_queue(dev_queue);
#endif
}
@@ -1938,6 +1957,7 @@ static inline void netdev_completed_queue(struct net_device *dev,
static inline void netdev_tx_reset_queue(struct netdev_queue *q)
{
#ifdef CONFIG_BQL
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
dql_reset(&q->dql);
#endif
}
@@ -1953,7 +1973,7 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
*
* Test if the device has been brought up.
*/
-static inline int netif_running(const struct net_device *dev)
+static inline bool netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
@@ -2003,16 +2023,16 @@ static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
-static inline int __netif_subqueue_stopped(const struct net_device *dev,
- u16 queue_index)
+static inline bool __netif_subqueue_stopped(const struct net_device *dev,
+ u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
-static inline int netif_subqueue_stopped(const struct net_device *dev,
- struct sk_buff *skb)
+static inline bool netif_subqueue_stopped(const struct net_device *dev,
+ struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
@@ -2051,7 +2071,7 @@ static inline u16 skb_tx_hash(const struct net_device *dev,
*
* Check if device has multiple transmit queues
*/
-static inline int netif_is_multiqueue(const struct net_device *dev)
+static inline bool netif_is_multiqueue(const struct net_device *dev)
{
return dev->num_tx_queues > 1;
}
@@ -2121,7 +2141,7 @@ extern int netdev_rx_handler_register(struct net_device *dev,
void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
-extern int dev_valid_name(const char *name);
+extern bool dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
@@ -2187,7 +2207,7 @@ extern void linkwatch_forget_dev(struct net_device *dev);
*
* Check if carrier is present on device
*/
-static inline int netif_carrier_ok(const struct net_device *dev)
+static inline bool netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
@@ -2239,7 +2259,7 @@ static inline void netif_dormant_off(struct net_device *dev)
*
* Check if carrier is present on device
*/
-static inline int netif_dormant(const struct net_device *dev)
+static inline bool netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
@@ -2251,7 +2271,7 @@ static inline int netif_dormant(const struct net_device *dev)
*
* Check if carrier is operational
*/
-static inline int netif_oper_up(const struct net_device *dev)
+static inline bool netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
@@ -2263,7 +2283,7 @@ static inline int netif_oper_up(const struct net_device *dev)
*
* Check if device has not been removed from system.
*/
-static inline int netif_device_present(struct net_device *dev)
+static inline bool netif_device_present(struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
@@ -2333,9 +2353,9 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
txq->xmit_lock_owner = smp_processor_id();
}
-static inline int __netif_tx_trylock(struct netdev_queue *txq)
+static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
- int ok = spin_trylock(&txq->_xmit_lock);
+ bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
txq->xmit_lock_owner = smp_processor_id();
return ok;
@@ -2556,6 +2576,8 @@ extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
@@ -2611,7 +2633,7 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
netdev_features_t netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(netdev_features_t features, int gso_type)
+static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2626,17 +2648,18 @@ static inline int net_gso_ok(netdev_features_t features, int gso_type)
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
+static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
}
-static inline int netif_needs_gso(struct sk_buff *skb,
- netdev_features_t features)
+static inline bool netif_needs_gso(struct sk_buff *skb,
+ netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
- unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+ unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
+ (skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
static inline void netif_set_gso_max_size(struct net_device *dev,
@@ -2645,11 +2668,16 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
-static inline int netif_is_bond_slave(struct net_device *dev)
+static inline bool netif_is_bond_slave(struct net_device *dev)
{
return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
+static inline bool netif_supports_nofcs(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_SUPP_NOFCS;
+}
+
extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */
@@ -2687,14 +2715,14 @@ int netdev_info(const struct net_device *dev, const char *format, ...);
#define MODULE_ALIAS_NETDEV(device) \
MODULE_ALIAS("netdev-" device)
-#if defined(DEBUG)
-#define netdev_dbg(__dev, format, args...) \
- netdev_printk(KERN_DEBUG, __dev, format, ##args)
-#elif defined(CONFIG_DYNAMIC_DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_netdev_dbg(__dev, format, ##args); \
} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
#else
#define netdev_dbg(__dev, format, args...) \
({ \