summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/dcache.h20
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/lockref.h71
-rw-r--r--include/linux/mlx5/device.h22
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/nsproxy.h6
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
13 files changed, 126 insertions, 64 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c..efdc944 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
#include <linux/seqlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
+#include <linux/lockref.h>
struct nameidata;
struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
# endif
#endif
+#define d_lock d_lockref.lock
+
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
/* Ref lookup also touches following */
- unsigned int d_count; /* protected by d_lock */
- spinlock_t d_lock; /* per dentry lock */
+ struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
@@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
assert_spin_locked(&dentry->d_lock);
if (!read_seqcount_retry(&dentry->d_seq, seq)) {
ret = 1;
- dentry->d_count++;
+ dentry->d_lockref.count++;
}
return ret;
@@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
static inline unsigned d_count(const struct dentry *dentry)
{
- return dentry->d_count;
+ return dentry->d_lockref.count;
}
/* validate "insecure" dentry pointer */
@@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
@@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
if (dentry)
- dentry->d_count++;
+ dentry->d_lockref.count++;
return dentry;
}
static inline struct dentry *dget(struct dentry *dentry)
{
- if (dentry) {
- spin_lock(&dentry->d_lock);
- dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (dentry)
+ lockref_get(&dentry->d_lockref);
return dentry;
}
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23..79640e0 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
#include <linux/bitmap.h>
#include <linux/if.h>
+#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
-enum
-{
- IPV4_DEVCONF_FORWARDING=1,
- IPV4_DEVCONF_MC_FORWARDING,
- IPV4_DEVCONF_PROXY_ARP,
- IPV4_DEVCONF_ACCEPT_REDIRECTS,
- IPV4_DEVCONF_SECURE_REDIRECTS,
- IPV4_DEVCONF_SEND_REDIRECTS,
- IPV4_DEVCONF_SHARED_MEDIA,
- IPV4_DEVCONF_RP_FILTER,
- IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
- IPV4_DEVCONF_BOOTP_RELAY,
- IPV4_DEVCONF_LOG_MARTIANS,
- IPV4_DEVCONF_TAG,
- IPV4_DEVCONF_ARPFILTER,
- IPV4_DEVCONF_MEDIUM_ID,
- IPV4_DEVCONF_NOXFRM,
- IPV4_DEVCONF_NOPOLICY,
- IPV4_DEVCONF_FORCE_IGMP_VERSION,
- IPV4_DEVCONF_ARP_ANNOUNCE,
- IPV4_DEVCONF_ARP_IGNORE,
- IPV4_DEVCONF_PROMOTE_SECONDARIES,
- IPV4_DEVCONF_ARP_ACCEPT,
- IPV4_DEVCONF_ARP_NOTIFY,
- IPV4_DEVCONF_ACCEPT_LOCAL,
- IPV4_DEVCONF_SRC_VMARK,
- IPV4_DEVCONF_PROXY_ARP_PVLAN,
- IPV4_DEVCONF_ROUTE_LOCALNET,
- __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95b..b8b7dc7 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 0000000..01233e0
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+
+struct lockref {
+ spinlock_t lock;
+ unsigned int count;
+};
+
+/**
+ * lockref_get - Increments reference count unconditionally
+ * @lockcnt: pointer to lockref structure
+ *
+ * This operation is only valid if you already hold a reference
+ * to the object, so you know the count cannot be zero.
+ */
+static inline void lockref_get(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+}
+
+/**
+ * lockref_get_not_zero - Increments count unless the count is 0
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count is 0
+ */
+static inline int lockref_get_not_zero(struct lockref *lockref)
+{
+ int retval = 0;
+
+ spin_lock(&lockref->lock);
+ if (lockref->count) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+
+/**
+ * lockref_put_or_lock - decrements count unless count <= 1 before decrement
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
+ */
+static inline int lockref_put_or_lock(struct lockref *lockref)
+{
+ spin_lock(&lockref->lock);
+ if (lockref->count <= 1)
+ return 0;
+ lockref->count--;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+
+#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 737685e..68029b3 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
- u8 rsvd22[4];
- __be16 max_qp_mcg;
- u8 rsvd23;
+ __be32 max_qp_mcg;
+ u8 rsvd22[3];
u8 log_max_mcg;
- u8 rsvd24;
+ u8 rsvd23;
u8 log_max_pd;
- u8 rsvd25;
+ u8 rsvd24;
u8 log_max_xrcd;
- u8 rsvd26[42];
+ u8 rsvd25[42];
__be16 log_uar_page_sz;
- u8 rsvd27[28];
+ u8 rsvd26[28];
u8 log_msx_atomic_size_qp;
- u8 rsvd28[2];
+ u8 rsvd27[2];
u8 log_msx_atomic_size_dc;
- u8 rsvd29[76];
+ u8 rsvd28[76];
};
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
- u8 rsvd1[2];
- __be16 num_pages;
- __be32 rsvd2[5];
+ __be32 num_pages;
+ __be32 rsvd1[5];
};
union ev_data {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2aa258b..8888381 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,7 +358,7 @@ struct mlx5_caps {
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
- u16 max_qp_mcg;
+ u32 max_qp_mcg;
int min_page_sz;
};
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages);
+ s32 npages);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa..faf4b7c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 10e5947..b4ec59d 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -14,6 +14,10 @@ struct fs_struct;
* A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc.
*
+ * The pid namespace is an exception -- it's accessed using
+ * task_active_pid_ns. The pid namespace here is the
+ * namespace that children will use.
+ *
* 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks.
@@ -27,7 +31,7 @@ struct nsproxy {
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
- struct pid_namespace *pid_ns;
+ struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 580a532..6d91fcb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/err.h>
+#include <linux/bug.h>
struct module;
struct device;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d722490..078066d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1532,6 +1532,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(struct task_struct *p)
{
@@ -1543,6 +1545,8 @@ static inline int pid_alive(struct task_struct *p)
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
@@ -1894,6 +1898,8 @@ extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ce..75f3494 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
#endif
/**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d..8d4fa82 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
swp_entry_t arch_entry;
BUG_ON(pte_file(pte));
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d70..84662ec 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
int __user *);
#else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+ int __user *, int);
+#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, int);
#endif
+#endif
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,