summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/aio.h21
-rw-r--r--include/linux/anon_inodes.h3
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cpu_rmap.h3
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/dcache.h14
-rw-r--r--include/linux/fs.h32
-rw-r--r--include/linux/fs_struct.h11
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--include/linux/interrupt.h75
-rw-r--r--include/linux/iommu.h16
-rw-r--r--include/linux/irq.h9
-rw-r--r--include/linux/irqdesc.h3
-rw-r--r--include/linux/irqnr.h19
-rw-r--r--include/linux/kernel_stat.h34
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/list_lru.h131
-rw-r--r--include/linux/memcontrol.h148
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/percpu_ida.h60
-rw-r--r--include/linux/platform_data/exynos_thermal.h119
-rw-r--r--include/linux/platform_data/leds-lp55xx.h10
-rw-r--r--include/linux/platform_data/leds-pca963x.h (renamed from include/linux/platform_data/leds-pca9633.h)25
-rw-r--r--include/linux/res_counter.h2
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/seqlock.h68
-rw-r--r--include/linux/shrinker.h54
-rw-r--r--include/linux/slab.h156
-rw-r--r--include/linux/slab_def.h106
-rw-r--r--include/linux/slob_def.h31
-rw-r--r--include/linux/slub_def.h110
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/thermal.h18
-rw-r--r--include/linux/time-armada-370-xp.h16
-rw-r--r--include/linux/timex.h1
40 files changed, 671 insertions, 663 deletions
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 1bdf965..d9c92da 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -27,15 +27,13 @@ struct kiocb;
*/
#define KIOCB_CANCELLED ((void *) (~0ULL))
-typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *);
+typedef int (kiocb_cancel_fn)(struct kiocb *);
struct kiocb {
- atomic_t ki_users;
-
struct file *ki_filp;
struct kioctx *ki_ctx; /* NULL for sync ops */
kiocb_cancel_fn *ki_cancel;
- void (*ki_dtor)(struct kiocb *);
+ void *private;
union {
void __user *user;
@@ -44,17 +42,7 @@ struct kiocb {
__u64 ki_user_data; /* user's data for completion */
loff_t ki_pos;
-
- void *private;
- /* State that we remember to be able to restart/retry */
- unsigned short ki_opcode;
- size_t ki_nbytes; /* copy of iocb->aio_nbytes */
- char __user *ki_buf; /* remaining iocb->aio_buf */
- size_t ki_left; /* remaining bytes */
- struct iovec ki_inline_vec; /* inline vector */
- struct iovec *ki_iovec;
- unsigned long ki_nr_segs;
- unsigned long ki_cur_seg;
+ size_t ki_nbytes; /* copy of iocb->aio_nbytes */
struct list_head ki_list; /* the aio core uses this
* for cancellation */
@@ -74,7 +62,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
{
*kiocb = (struct kiocb) {
- .ki_users = ATOMIC_INIT(1),
.ki_ctx = NULL,
.ki_filp = filp,
.ki_obj.tsk = current,
@@ -84,7 +71,6 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
/* prototypes */
#ifdef CONFIG_AIO
extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
-extern void aio_put_req(struct kiocb *iocb);
extern void aio_complete(struct kiocb *iocb, long res, long res2);
struct mm_struct;
extern void exit_aio(struct mm_struct *mm);
@@ -93,7 +79,6 @@ extern long do_io_submit(aio_context_t ctx_id, long nr,
void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
#else
static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
-static inline void aio_put_req(struct kiocb *iocb) { }
static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
struct mm_struct;
static inline void exit_aio(struct mm_struct *mm) { }
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
index 8013a45..cf573c2 100644
--- a/include/linux/anon_inodes.h
+++ b/include/linux/anon_inodes.h
@@ -13,6 +13,9 @@ struct file_operations;
struct file *anon_inode_getfile(const char *name,
const struct file_operations *fops,
void *priv, int flags);
+struct file *anon_inode_getfile_private(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags);
int anon_inode_getfd(const char *name, const struct file_operations *fops,
void *priv, int flags);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index ce6df39..8f47625 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -335,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req);
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
+extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
struct ceph_file_layout *layout,
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index 1739510..bdd18ca 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -52,8 +52,6 @@ static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
return rmap->obj[rmap->near[cpu].index];
}
-#ifdef CONFIG_GENERIC_HARDIRQS
-
/**
* alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
* @size: Number of objects to be mapped
@@ -68,5 +66,4 @@ extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
-#endif
#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index d568f39..fcabc42 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -85,7 +85,6 @@ struct cpufreq_policy {
struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
- int transition_ongoing; /* Tracks transition status */
};
/* Only for ACPI */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index feaa8d8..59066e0 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -55,11 +55,11 @@ struct qstr {
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
struct dentry_stat_t {
- int nr_dentry;
- int nr_unused;
- int age_limit; /* age in seconds */
- int want_pages; /* pages requested by system */
- int dummy[2];
+ long nr_dentry;
+ long nr_unused;
+ long age_limit; /* age in seconds */
+ long want_pages; /* pages requested by system */
+ long dummy[2];
};
extern struct dentry_stat_t dentry_stat;
@@ -395,4 +395,8 @@ static inline bool d_mountpoint(const struct dentry *dentry)
extern int sysctl_vfs_cache_pressure;
+static inline unsigned long vfs_pressure_ratio(unsigned long val)
+{
+ return mult_frac(val, sysctl_vfs_cache_pressure, 100);
+}
#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 529d871..3f40547 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -10,6 +10,7 @@
#include <linux/stat.h>
#include <linux/cache.h>
#include <linux/list.h>
+#include <linux/list_lru.h>
#include <linux/llist.h>
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
@@ -1269,15 +1270,6 @@ struct super_block {
struct list_head s_files;
#endif
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
- /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
- struct list_head s_dentry_lru; /* unused dentry lru */
- int s_nr_dentry_unused; /* # of dentry on lru */
-
- /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */
- spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp;
- struct list_head s_inode_lru; /* unused inode lru */
- int s_nr_inodes_unused; /* # of inodes on lru */
-
struct block_device *s_bdev;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
@@ -1331,11 +1323,14 @@ struct super_block {
/* AIO completions deferred from interrupt context */
struct workqueue_struct *s_dio_done_wq;
-};
-/* superblock cache pruning functions */
-extern void prune_icache_sb(struct super_block *sb, int nr_to_scan);
-extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan);
+ /*
+ * Keep the lru lists last in the structure so they always sit on their
+ * own individual cachelines.
+ */
+ struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
+ struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
+};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1629,8 +1624,8 @@ struct super_operations {
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
#endif
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
- int (*nr_cached_objects)(struct super_block *);
- void (*free_cached_objects)(struct super_block *, int);
+ long (*nr_cached_objects)(struct super_block *, int);
+ long (*free_cached_objects)(struct super_block *, long, int);
};
/*
@@ -2074,6 +2069,7 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
+extern int sb_is_blkdev_sb(struct super_block *sb);
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -2093,6 +2089,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
{
}
+
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+ return 0;
+}
#endif
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
@@ -2494,7 +2495,6 @@ extern const struct file_operations generic_ro_fops;
#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
-extern int vfs_follow_link(struct nameidata *, const char *);
extern int page_readlink(struct dentry *, char __user *, int);
extern void *page_follow_link_light(struct dentry *, struct nameidata *);
extern void page_put_link(struct dentry *, struct nameidata *, void *);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 2b93a9a..0efc3e6 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -39,17 +39,6 @@ static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
spin_unlock(&fs->lock);
}
-static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
- struct path *pwd)
-{
- spin_lock(&fs->lock);
- *root = fs->root;
- path_get(root);
- *pwd = fs->pwd;
- path_get(pwd);
- spin_unlock(&fs->lock);
-}
-
extern bool current_chrooted(void);
#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index ccfe17c..1e04106 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -7,11 +7,7 @@
#include <linux/vtime.h>
-#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
extern void synchronize_irq(unsigned int irq);
-#else
-# define synchronize_irq(irq) barrier()
-#endif
#if defined(CONFIG_TINY_RCU)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index ee1ffc5..31b9d29 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -756,6 +756,10 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts);
int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b60de92..3935428 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -96,9 +96,6 @@ extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd,
struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
-extern int handle_pte_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- pte_t *pte, pmd_t *pmd, unsigned int flags);
extern int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5fa5afe..5e865b5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -120,7 +120,6 @@ struct irqaction {
extern irqreturn_t no_action(int cpl, void *dev_id);
-#ifdef CONFIG_GENERIC_HARDIRQS
extern int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
@@ -140,40 +139,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
-#else
-
-extern int __must_check
-request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
- const char *name, void *dev);
-
-/*
- * Special function to avoid ifdeffery in kernel/irq/devres.c which
- * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
- * m68k). I really love these $@%#!* obvious Makefile references:
- * ../../../kernel/irq/devres.o
- */
-static inline int __must_check
-request_threaded_irq(unsigned int irq, irq_handler_t handler,
- irq_handler_t thread_fn,
- unsigned long flags, const char *name, void *dev)
-{
- return request_irq(irq, handler, flags, name, dev);
-}
-
-static inline int __must_check
-request_any_context_irq(unsigned int irq, irq_handler_t handler,
- unsigned long flags, const char *name, void *dev_id)
-{
- return request_irq(irq, handler, flags, name, dev_id);
-}
-
-static inline int __must_check
-request_percpu_irq(unsigned int irq, irq_handler_t handler,
- const char *devname, void __percpu *percpu_dev_id)
-{
- return request_irq(irq, handler, 0, devname, percpu_dev_id);
-}
-#endif
extern void free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
@@ -221,7 +186,6 @@ extern void enable_irq(unsigned int irq);
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
/* The following three functions are for the core kernel use only. */
-#ifdef CONFIG_GENERIC_HARDIRQS
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
#ifdef CONFIG_PM_SLEEP
@@ -229,13 +193,8 @@ extern int check_wakeup_irqs(void);
#else
static inline int check_wakeup_irqs(void) { return 0; }
#endif
-#else
-static inline void suspend_device_irqs(void) { };
-static inline void resume_device_irqs(void) { };
-static inline int check_wakeup_irqs(void) { return 0; }
-#endif
-#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
+#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
@@ -287,9 +246,8 @@ static inline int irq_set_affinity_hint(unsigned int irq,
{
return -EINVAL;
}
-#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
+#endif /* CONFIG_SMP */
-#ifdef CONFIG_GENERIC_HARDIRQS
/*
* Special lockdep variants of irq disabling/enabling.
* These should be used for locking constructs that
@@ -354,33 +312,6 @@ static inline int disable_irq_wake(unsigned int irq)
return irq_set_irq_wake(irq, 0);
}
-#else /* !CONFIG_GENERIC_HARDIRQS */
-/*
- * NOTE: non-genirq architectures, if they want to support the lock
- * validator need to define the methods below in their asm/irq.h
- * files, under an #ifdef CONFIG_LOCKDEP section.
- */
-#ifndef CONFIG_LOCKDEP
-# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
-# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
- disable_irq_nosync(irq)
-# define disable_irq_lockdep(irq) disable_irq(irq)
-# define enable_irq_lockdep(irq) enable_irq(irq)
-# define enable_irq_lockdep_irqrestore(irq, flags) \
- enable_irq(irq)
-# endif
-
-static inline int enable_irq_wake(unsigned int irq)
-{
- return 0;
-}
-
-static inline int disable_irq_wake(unsigned int irq)
-{
- return 0;
-}
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
#ifdef CONFIG_IRQ_FORCED_THREADING
extern bool force_irqthreads;
@@ -655,7 +586,7 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
* if more than one irq occurred.
*/
-#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
+#if !defined(CONFIG_GENERIC_IRQ_PROBE)
static inline unsigned long probe_irq_on(void)
{
return 0;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3aeb730..7ea319e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -58,10 +58,26 @@ struct iommu_domain {
#define IOMMU_CAP_CACHE_COHERENCY 0x1
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
+/*
+ * Following constraints are specifc to FSL_PAMUV1:
+ * -aperture must be power of 2, and naturally aligned
+ * -number of windows must be power of 2, and address space size
+ * of each window is determined by aperture size / # of windows
+ * -the actual size of the mapped region of a window must be power
+ * of 2 starting with 4KB and physical address must be naturally
+ * aligned.
+ * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
+ * The caller can invoke iommu_domain_get_attr to check if the underlying
+ * iommu implementation supports these constraints.
+ */
+
enum iommu_attr {
DOMAIN_ATTR_GEOMETRY,
DOMAIN_ATTR_PAGING,
DOMAIN_ATTR_WINDOWS,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f04d3ba..56bb0dc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -382,8 +382,6 @@ extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);
-#ifdef CONFIG_GENERIC_HARDIRQS
-
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
void irq_move_irq(struct irq_data *data);
void irq_move_masked_irq(struct irq_data *data);
@@ -802,11 +800,4 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
-#else /* !CONFIG_GENERIC_HARDIRQS */
-
-extern struct msi_desc *irq_get_msi_desc(unsigned int irq);
-extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
-
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 623325e..56fb646 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -76,8 +76,6 @@ struct irq_desc {
extern struct irq_desc irq_desc[NR_IRQS];
#endif
-#ifdef CONFIG_GENERIC_HARDIRQS
-
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
@@ -173,6 +171,5 @@ __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
desc->preflow_handler = handler;
}
#endif
-#endif
#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 0a2dc46..fdd5cc1 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -4,23 +4,6 @@
#include <uapi/linux/irqnr.h>
-#ifndef CONFIG_GENERIC_HARDIRQS
-#include <asm/irq.h>
-
-/*
- * Wrappers for non-genirq architectures:
- */
-#define nr_irqs NR_IRQS
-#define irq_to_desc(irq) (&irq_desc[irq])
-
-# define for_each_irq_desc(irq, desc) \
- for (irq = 0; irq < nr_irqs; irq++)
-
-# define for_each_irq_desc_reverse(irq, desc) \
- for (irq = nr_irqs - 1; irq >= 0; irq--)
-
-#else /* CONFIG_GENERIC_HARDIRQS */
-
extern int nr_irqs;
extern struct irq_desc *irq_to_desc(unsigned int irq);
unsigned int irq_get_next_irq(unsigned int offset);
@@ -50,8 +33,6 @@ unsigned int irq_get_next_irq(unsigned int offset);
for (irq = irq_get_next_irq(0); irq < nr_irqs; \
irq = irq_get_next_irq(irq + 1))
-#endif /* CONFIG_GENERIC_HARDIRQS */
-
#define for_each_irq_nr(irq) \
for (irq = 0; irq < nr_irqs; irq++)
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ed5f6ed..51c72be 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -36,9 +36,6 @@ struct kernel_cpustat {
};
struct kernel_stat {
-#ifndef CONFIG_GENERIC_HARDIRQS
- unsigned int irqs[NR_IRQS];
-#endif
unsigned long irqs_sum;
unsigned int softirqs[NR_SOFTIRQS];
};
@@ -54,22 +51,6 @@ DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
extern unsigned long long nr_context_switches(void);
-#ifndef CONFIG_GENERIC_HARDIRQS
-
-struct irq_desc;
-
-static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
- struct irq_desc *desc)
-{
- __this_cpu_inc(kstat.irqs[irq]);
- __this_cpu_inc(kstat.irqs_sum);
-}
-
-static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
-{
- return kstat_cpu(cpu).irqs[irq];
-}
-#else
#include <linux/irq.h>
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
@@ -79,8 +60,6 @@ do { \
__this_cpu_inc(kstat.irqs_sum); \
} while (0)
-#endif
-
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
{
__this_cpu_inc(kstat.softirqs[irq]);
@@ -94,20 +73,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
/*
* Number of interrupts per specific IRQ source, since bootup
*/
-#ifndef CONFIG_GENERIC_HARDIRQS
-static inline unsigned int kstat_irqs(unsigned int irq)
-{
- unsigned int sum = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- sum += kstat_irqs_cpu(irq, cpu);
-
- return sum;
-}
-#else
extern unsigned int kstat_irqs(unsigned int irq);
-#endif
/*
* Number of interrupts per cpu, since bootup
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ca645a0..0fbbc7a 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -533,6 +533,7 @@ int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
new file mode 100644
index 0000000..3ce5417
--- /dev/null
+++ b/include/linux/list_lru.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
+ * Authors: David Chinner and Glauber Costa
+ *
+ * Generic LRU infrastructure
+ */
+#ifndef _LRU_LIST_H
+#define _LRU_LIST_H
+
+#include <linux/list.h>
+#include <linux/nodemask.h>
+
+/* list_lru_walk_cb has to always return one of those */
+enum lru_status {
+ LRU_REMOVED, /* item removed from list */
+ LRU_ROTATE, /* item referenced, give another pass */
+ LRU_SKIP, /* item cannot be locked, skip */
+ LRU_RETRY, /* item not freeable. May drop the lock
+ internally, but has to return locked. */
+};
+
+struct list_lru_node {
+ spinlock_t lock;
+ struct list_head list;
+ /* kept as signed so we can catch imbalance bugs */
+ long nr_items;
+} ____cacheline_aligned_in_smp;
+
+struct list_lru {
+ struct list_lru_node *node;
+ nodemask_t active_nodes;
+};
+
+void list_lru_destroy(struct list_lru *lru);
+int list_lru_init(struct list_lru *lru);
+
+/**
+ * list_lru_add: add an element to the lru list's tail
+ * @list_lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * If the element is already part of a list, this function returns doing
+ * nothing. Therefore the caller does not need to keep state about whether or
+ * not the element already belongs in the list and is allowed to lazy update
+ * it. Note however that this is valid for *a* list, not *this* list. If
+ * the caller organize itself in a way that elements can be in more than
+ * one type of list, it is up to the caller to fully remove the item from
+ * the previous list (with list_lru_del() for instance) before moving it
+ * to @list_lru
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_del: delete an element to the lru list
+ * @list_lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function works analogously as list_lru_add in terms of list
+ * manipulation. The comments about an element already pertaining to
+ * a list are also valid for list_lru_del.
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_del(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_count_node: return the number of objects currently held by @lru
+ * @lru: the lru pointer.
+ * @nid: the node id to count from.
+ *
+ * Always return a non-negative number, 0 for empty lists. There is no
+ * guarantee that the list is not updated while the count is being computed.
+ * Callers that want such a guarantee need to provide an outer lock.
+ */
+unsigned long list_lru_count_node(struct list_lru *lru, int nid);
+static inline unsigned long list_lru_count(struct list_lru *lru)
+{
+ long count = 0;
+ int nid;
+
+ for_each_node_mask(nid, lru->active_nodes)
+ count += list_lru_count_node(lru, nid);
+
+ return count;
+}
+
+typedef enum lru_status
+(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg);
+/**
+ * list_lru_walk_node: walk a list_lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @isolate: callback function that is resposible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * This function will scan all elements in a particular list_lru, calling the
+ * @isolate callback for each of those items, along with the current list
+ * spinlock and a caller-provided opaque. The @isolate callback can choose to
+ * drop the lock internally, but *must* return with the lock held. The callback
+ * will return an enum lru_status telling the list_lru infrastructure what to
+ * do with the object being scanned.
+ *
+ * Please note that nr_to_walk does not mean how many objects will be freed,
+ * just how many objects will be scanned.
+ *
+ * Return value: the number of objects effectively removed from the LRU.
+ */
+unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
+
+static inline unsigned long
+list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
+ void *cb_arg, unsigned long nr_to_walk)
+{
+ long isolated = 0;
+ int nid;
+
+ for_each_node_mask(nid, lru->active_nodes) {
+ isolated += list_lru_walk_node(lru, nid, isolate,
+ cb_arg, &nr_to_walk);
+ if (nr_to_walk <= 0)
+ break;
+ }
+ return isolated;
+}
+#endif /* _LRU_LIST_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c41609..60e9587 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -30,9 +30,21 @@ struct page;
struct mm_struct;
struct kmem_cache;
-/* Stats that can be updated by kernel. */
-enum mem_cgroup_page_stat_item {
- MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+/*
+ * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
+ * These two lists should keep in accord with each other.
+ */
+enum mem_cgroup_stat_index {
+ /*
+ * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
+ */
+ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
+ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
+ MEM_CGROUP_STAT_NSTATS,
};
struct mem_cgroup_reclaim_cookie {
@@ -41,6 +53,23 @@ struct mem_cgroup_reclaim_cookie {
unsigned int generation;
};
+enum mem_cgroup_filter_t {
+ VISIT, /* visit current node */
+ SKIP, /* skip the current node and continue traversal */
+ SKIP_TREE, /* skip the whole subtree and continue traversal */
+};
+
+/*
+ * mem_cgroup_filter_t predicate might instruct mem_cgroup_iter_cond how to
+ * iterate through the hierarchy tree. Each tree element is checked by the
+ * predicate before it is returned by the iterator. If a filter returns
+ * SKIP or SKIP_TREE then the iterator code continues traversal (with the
+ * next node down the hierarchy or the next node that doesn't belong under the
+ * memcg's subtree).
+ */
+typedef enum mem_cgroup_filter_t
+(*mem_cgroup_iter_filter)(struct mem_cgroup *memcg, struct mem_cgroup *root);
+
#ifdef CONFIG_MEMCG
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
@@ -108,9 +137,18 @@ mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok);
-struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
- struct mem_cgroup *,
- struct mem_cgroup_reclaim_cookie *);
+struct mem_cgroup *mem_cgroup_iter_cond(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond);
+
+static inline struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim)
+{
+ return mem_cgroup_iter_cond(root, prev, reclaim, NULL);
+}
+
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/*
@@ -125,6 +163,48 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
struct page *newpage);
+/**
+ * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
+ * @new: true to enable, false to disable
+ *
+ * Toggle whether a failed memcg charge should invoke the OOM killer
+ * or just return -ENOMEM. Returns the previous toggle state.
+ *
+ * NOTE: Any path that enables the OOM killer before charging must
+ * call mem_cgroup_oom_synchronize() afterward to finalize the
+ * OOM handling and clean up.
+ */
+static inline bool mem_cgroup_toggle_oom(bool new)
+{
+ bool old;
+
+ old = current->memcg_oom.may_oom;
+ current->memcg_oom.may_oom = new;
+
+ return old;
+}
+
+static inline void mem_cgroup_enable_oom(void)
+{
+ bool old = mem_cgroup_toggle_oom(true);
+
+ WARN_ON(old == true);
+}
+
+static inline void mem_cgroup_disable_oom(void)
+{
+ bool old = mem_cgroup_toggle_oom(false);
+
+ WARN_ON(old == false);
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_oom.in_memcg_oom;
+}
+
+bool mem_cgroup_oom_synchronize(void);
+
#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
#endif
@@ -165,24 +245,24 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
}
void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx,
+ enum mem_cgroup_stat_index idx,
int val);
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(page, idx, 1);
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
mem_cgroup_update_page_stat(page, idx, -1);
}
-unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned);
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+ struct mem_cgroup *root);
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
@@ -296,6 +376,15 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
struct page *oldpage, struct page *newpage, bool migration_ok)
{
}
+static inline struct mem_cgroup *
+mem_cgroup_iter_cond(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim,
+ mem_cgroup_iter_filter cond)
+{
+ /* first call must return non-NULL, second return NULL */
+ return (struct mem_cgroup *)(unsigned long)!prev;
+}
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
@@ -348,22 +437,45 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
{
}
+static inline bool mem_cgroup_toggle_oom(bool new)
+{
+ return false;
+}
+
+static inline void mem_cgroup_enable_oom(void)
+{
+}
+
+static inline void mem_cgroup_disable_oom(void)
+{
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(void)
+{
+ return false;
+}
+
static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
}
static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_page_stat_item idx)
+ enum mem_cgroup_stat_index idx)
{
}
static inline
-unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
- gfp_t gfp_mask,
- unsigned long *total_scanned)
+enum mem_cgroup_filter_t
+mem_cgroup_soft_reclaim_eligible(struct mem_cgroup *memcg,
+ struct mem_cgroup *root)
{
- return 0;
+ return VISIT;
}
static inline void mem_cgroup_split_huge_fixup(struct page *head)
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 6fe5214..8d3c57f 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -53,6 +53,9 @@ extern int migrate_vmas(struct mm_struct *mm,
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
+extern int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ struct buffer_head *head, enum migrate_mode mode);
#else
static inline void putback_lru_pages(struct list_head *l) {}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index caf543c..8b6e55e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -176,6 +176,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x40 /* second try */
+#define FAULT_FLAG_USER 0x80 /* The fault originated in userspace */
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -876,11 +877,12 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
+#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_HWPOISON_LARGE)
+ VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -984,7 +986,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
unmap_mapping_range(mapping, holebegin, holelen, 0);
}
-extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
+extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
int truncate_inode_page(struct address_space *mapping, struct page *page);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index faf4b7c..d9851ee 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -322,6 +322,7 @@ struct mm_rss_stat {
atomic_long_t count[NR_MM_COUNTERS];
};
+struct kioctx_table;
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
@@ -383,8 +384,8 @@ struct mm_struct {
struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct hlist_head ioctx_list;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MM_OWNER
/*
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index bc95b2b..97fbecd 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -758,6 +758,7 @@
#define PCI_DEVICE_ID_HP_CISSE 0x323a
#define PCI_DEVICE_ID_HP_CISSF 0x323b
#define PCI_DEVICE_ID_HP_CISSH 0x323c
+#define PCI_DEVICE_ID_HP_CISSI 0x3239
#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
#define PCI_VENDOR_ID_PCTECH 0x1042
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
new file mode 100644
index 0000000..0b23edb
--- /dev/null
+++ b/include/linux/percpu_ida.h
@@ -0,0 +1,60 @@
+#ifndef __PERCPU_IDA_H__
+#define __PERCPU_IDA_H__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+struct percpu_ida_cpu;
+
+struct percpu_ida {
+ /*
+ * number of tags available to be allocated, as passed to
+ * percpu_ida_init()
+ */
+ unsigned nr_tags;
+
+ struct percpu_ida_cpu __percpu *tag_cpu;
+
+ /*
+ * Bitmap of cpus that (may) have tags on their percpu freelists:
+ * steal_tags() uses this to decide when to steal tags, and which cpus
+ * to try stealing from.
+ *
+ * It's ok for a freelist to be empty when its bit is set - steal_tags()
+ * will just keep looking - but the bitmap _must_ be set whenever a
+ * percpu freelist does have tags.
+ */
+ cpumask_t cpus_have_tags;
+
+ struct {
+ spinlock_t lock;
+ /*
+ * When we go to steal tags from another cpu (see steal_tags()),
+ * we want to pick a cpu at random. Cycling through them every
+ * time we steal is a bit easier and more or less equivalent:
+ */
+ unsigned cpu_last_stolen;
+
+ /* For sleeping on allocation failure */
+ wait_queue_head_t wait;
+
+ /*
+ * Global freelist - it's a stack where nr_free points to the
+ * top
+ */
+ unsigned nr_free;
+ unsigned *freelist;
+ } ____cacheline_aligned_in_smp;
+};
+
+int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp);
+void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
+
+void percpu_ida_destroy(struct percpu_ida *pool);
+int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags);
+
+#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/platform_data/exynos_thermal.h b/include/linux/platform_data/exynos_thermal.h
deleted file mode 100644
index da7e627..0000000
--- a/include/linux/platform_data/exynos_thermal.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * exynos_thermal.h - Samsung EXYNOS TMU (Thermal Management Unit)
- *
- * Copyright (C) 2011 Samsung Electronics
- * Donggeun Kim <dg77.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _LINUX_EXYNOS_THERMAL_H
-#define _LINUX_EXYNOS_THERMAL_H
-#include <linux/cpu_cooling.h>
-
-enum calibration_type {
- TYPE_ONE_POINT_TRIMMING,
- TYPE_TWO_POINT_TRIMMING,
- TYPE_NONE,
-};
-
-enum soc_type {
- SOC_ARCH_EXYNOS4210 = 1,
- SOC_ARCH_EXYNOS,
-};
-/**
- * struct freq_clip_table
- * @freq_clip_max: maximum frequency allowed for this cooling state.
- * @temp_level: Temperature level at which the temperature clipping will
- * happen.
- * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
- *
- * This structure is required to be filled and passed to the
- * cpufreq_cooling_unregister function.
- */
-struct freq_clip_table {
- unsigned int freq_clip_max;
- unsigned int temp_level;
- const struct cpumask *mask_val;
-};
-
-/**
- * struct exynos_tmu_platform_data
- * @threshold: basic temperature for generating interrupt
- * 25 <= threshold <= 125 [unit: degree Celsius]
- * @threshold_falling: differntial value for setting threshold
- * of temperature falling interrupt.
- * @trigger_levels: array for each interrupt levels
- * [unit: degree Celsius]
- * 0: temperature for trigger_level0 interrupt
- * condition for trigger_level0 interrupt:
- * current temperature > threshold + trigger_levels[0]
- * 1: temperature for trigger_level1 interrupt
- * condition for trigger_level1 interrupt:
- * current temperature > threshold + trigger_levels[1]
- * 2: temperature for trigger_level2 interrupt
- * condition for trigger_level2 interrupt:
- * current temperature > threshold + trigger_levels[2]
- * 3: temperature for trigger_level3 interrupt
- * condition for trigger_level3 interrupt:
- * current temperature > threshold + trigger_levels[3]
- * @trigger_level0_en:
- * 1 = enable trigger_level0 interrupt,
- * 0 = disable trigger_level0 interrupt
- * @trigger_level1_en:
- * 1 = enable trigger_level1 interrupt,
- * 0 = disable trigger_level1 interrupt
- * @trigger_level2_en:
- * 1 = enable trigger_level2 interrupt,
- * 0 = disable trigger_level2 interrupt
- * @trigger_level3_en:
- * 1 = enable trigger_level3 interrupt,
- * 0 = disable trigger_level3 interrupt
- * @gain: gain of amplifier in the positive-TC generator block
- * 0 <= gain <= 15
- * @reference_voltage: reference voltage of amplifier
- * in the positive-TC generator block
- * 0 <= reference_voltage <= 31
- * @noise_cancel_mode: noise cancellation mode
- * 000, 100, 101, 110 and 111 can be different modes
- * @type: determines the type of SOC
- * @efuse_value: platform defined fuse value
- * @cal_type: calibration type for temperature
- * @freq_clip_table: Table representing frequency reduction percentage.
- * @freq_tab_count: Count of the above table as frequency reduction may
- * applicable to only some of the trigger levels.
- *
- * This structure is required for configuration of exynos_tmu driver.
- */
-struct exynos_tmu_platform_data {
- u8 threshold;
- u8 threshold_falling;
- u8 trigger_levels[4];
- bool trigger_level0_en;
- bool trigger_level1_en;
- bool trigger_level2_en;
- bool trigger_level3_en;
-
- u8 gain;
- u8 reference_voltage;
- u8 noise_cancel_mode;
- u32 efuse_value;
-
- enum calibration_type cal_type;
- enum soc_type type;
- struct freq_clip_table freq_tab[4];
- unsigned int freq_tab_count;
-};
-#endif /* _LINUX_EXYNOS_THERMAL_H */
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
index 202e290..51a2ff5 100644
--- a/include/linux/platform_data/leds-lp55xx.h
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -36,6 +36,13 @@ struct lp55xx_predef_pattern {
u8 size_b;
};
+enum lp8501_pwr_sel {
+ LP8501_ALL_VDD, /* D1~9 are connected to VDD */
+ LP8501_6VDD_3VOUT, /* D1~6 with VDD, D7~9 with VOUT */
+ LP8501_3VDD_6VOUT, /* D1~6 with VOUT, D7~9 with VDD */
+ LP8501_ALL_VOUT, /* D1~9 are connected to VOUT */
+};
+
/*
* struct lp55xx_platform_data
* @led_config : Configurable led class device
@@ -67,6 +74,9 @@ struct lp55xx_platform_data {
/* Predefined pattern data */
struct lp55xx_predef_pattern *patterns;
unsigned int num_patterns;
+
+ /* LP8501 specific */
+ enum lp8501_pwr_sel pwr_sel;
};
#endif /* _LEDS_LP55XX_H */
diff --git a/include/linux/platform_data/leds-pca9633.h b/include/linux/platform_data/leds-pca963x.h
index c5bf29b..e731f00 100644
--- a/include/linux/platform_data/leds-pca9633.h
+++ b/include/linux/platform_data/leds-pca963x.h
@@ -1,7 +1,8 @@
/*
- * PCA9633 LED chip driver.
+ * PCA963X LED chip driver.
*
* Copyright 2012 bct electronic GmbH
+ * Copyright 2013 Qtechnology A/S
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,18 +19,24 @@
* 02110-1301 USA
*/
-#ifndef __LINUX_PCA9633_H
-#define __LINUX_PCA9633_H
+#ifndef __LINUX_PCA963X_H
+#define __LINUX_PCA963X_H
#include <linux/leds.h>
-enum pca9633_outdrv {
- PCA9633_OPEN_DRAIN,
- PCA9633_TOTEM_POLE, /* aka push-pull */
+enum pca963x_outdrv {
+ PCA963X_OPEN_DRAIN,
+ PCA963X_TOTEM_POLE, /* aka push-pull */
};
-struct pca9633_platform_data {
+enum pca963x_blink_type {
+ PCA963X_SW_BLINK,
+ PCA963X_HW_BLINK,
+};
+
+struct pca963x_platform_data {
struct led_platform_data leds;
- enum pca9633_outdrv outdrv;
+ enum pca963x_outdrv outdrv;
+ enum pca963x_blink_type blink_type;
};
-#endif /* __LINUX_PCA9633_H*/
+#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index 96a509b..201a697 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -54,7 +54,7 @@ struct res_counter {
struct res_counter *parent;
};
-#define RESOURCE_MAX (unsigned long long)LLONG_MAX
+#define RES_COUNTER_MAX ULLONG_MAX
/**
* Helpers to interact with userspace
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 45f254d..6682da3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1393,6 +1393,13 @@ struct task_struct {
unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
} memcg_batch;
unsigned int memcg_kmem_skip_account;
+ struct memcg_oom_info {
+ unsigned int may_oom:1;
+ unsigned int in_memcg_oom:1;
+ unsigned int oom_locked:1;
+ int wakeups;
+ struct mem_cgroup *wait_on_memcg;
+ } memcg_oom;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 1829905..21a2093 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -3,15 +3,21 @@
/*
* Reader/writer consistent mechanism without starving writers. This type of
* lock for data where the reader wants a consistent set of information
- * and is willing to retry if the information changes. Readers never
- * block but they may have to retry if a writer is in
- * progress. Writers do not wait for readers.
+ * and is willing to retry if the information changes. There are two types
+ * of readers:
+ * 1. Sequence readers which never block a writer but they may have to retry
+ * if a writer is in progress by detecting change in sequence number.
+ * Writers do not wait for a sequence reader.
+ * 2. Locking readers which will wait if a writer or another locking reader
+ * is in progress. A locking reader in progress will also block a writer
+ * from going forward. Unlike the regular rwlock, the read lock here is
+ * exclusive so that only one locking reader can get it.
*
- * This is not as cache friendly as brlock. Also, this will not work
+ * This is not as cache friendly as brlock. Also, this may not work well
* for data that contains pointers, because any writer could
* invalidate a pointer that a reader was following.
*
- * Expected reader usage:
+ * Expected non-blocking reader usage:
* do {
* seq = read_seqbegin(&foo);
* ...
@@ -268,4 +274,56 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
spin_unlock_irqrestore(&sl->lock, flags);
}
+/*
+ * A locking reader exclusively locks out other writers and locking readers,
+ * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void read_seqlock_excl(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+}
+
+static inline void read_sequnlock_excl(seqlock_t *sl)
+{
+ spin_unlock(&sl->lock);
+}
+
+static inline void read_seqlock_excl_bh(seqlock_t *sl)
+{
+ spin_lock_bh(&sl->lock);
+}
+
+static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+{
+ spin_unlock_bh(&sl->lock);
+}
+
+static inline void read_seqlock_excl_irq(seqlock_t *sl)
+{
+ spin_lock_irq(&sl->lock);
+}
+
+static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+{
+ spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+ return flags;
+}
+
+#define read_seqlock_excl_irqsave(lock, flags) \
+ do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
+
+static inline void
+read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+ spin_unlock_irqrestore(&sl->lock, flags);
+}
+
#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index ac6b8ee..68c0970 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,39 +4,67 @@
/*
* This struct is used to pass information from page reclaim to the shrinkers.
* We consolidate the values for easier extention later.
+ *
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
*/
struct shrink_control {
gfp_t gfp_mask;
- /* How many slab objects shrinker() should scan and try to reclaim */
+ /*
+ * How many objects scan_objects should scan and try to reclaim.
+ * This is reset before every call, so it is safe for callees
+ * to modify.
+ */
unsigned long nr_to_scan;
+
+ /* shrink from these nodes */
+ nodemask_t nodes_to_scan;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
};
+#define SHRINK_STOP (~0UL)
/*
* A callback you can register to apply pressure to ageable caches.
*
- * 'sc' is passed shrink_control which includes a count 'nr_to_scan'
- * and a 'gfpmask'. It should look through the least-recently-used
- * 'nr_to_scan' entries and attempt to free them up. It should return
- * the number of objects which remain in the cache. If it returns -1, it means
- * it cannot do any scanning at this time (eg. there is a risk of deadlock).
+ * @count_objects should return the number of freeable items in the cache. If
+ * there are no objects to free or the number of freeable items cannot be
+ * determined, it should return 0. No deadlock checks should be done during the
+ * count callback - the shrinker relies on aggregating scan counts that couldn't
+ * be executed due to potential deadlocks to be run at a later call when the
+ * deadlock condition is no longer pending.
*
- * The 'gfpmask' refers to the allocation we are currently trying to
- * fulfil.
+ * @scan_objects will only be called if @count_objects returned a non-zero
+ * value for the number of freeable objects. The callout should scan the cache
+ * and attempt to free items from the cache. It should then return the number
+ * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
+ * due to potential deadlocks. If SHRINK_STOP is returned, then no further
+ * attempts to call the @scan_objects will be made from the current reclaim
+ * context.
*
- * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
- * querying the cache size, so a fastpath for that case is appropriate.
+ * @flags determine the shrinker abilities, like numa awareness
*/
struct shrinker {
- int (*shrink)(struct shrinker *, struct shrink_control *sc);
+ unsigned long (*count_objects)(struct shrinker *,
+ struct shrink_control *sc);
+ unsigned long (*scan_objects)(struct shrinker *,
+ struct shrink_control *sc);
+
int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
+ unsigned long flags;
/* These are for internal use */
struct list_head list;
- atomic_long_t nr_in_batch; /* objs pending delete */
+ /* objs pending delete, per node */
+ atomic_long_t *nr_deferred;
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-extern void register_shrinker(struct shrinker *);
+
+/* Flags */
+#define SHRINKER_NUMA_AWARE (1 << 0)
+
+extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
#endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0e..74f1058 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -4,6 +4,8 @@
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ * Unified interface for all slab allocators
*/
#ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
+#include <linux/kmemleak.h>
struct mem_cgroup;
/*
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+ return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
+{
+ return kmem_cache_alloc(s, flags);
+}
+
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif /* CONFIG_TRACING */
+
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size)
#include <linux/slub_def.h>
#endif
-#ifdef CONFIG_SLOB
-#include <linux/slob_def.h>
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+ void *ret;
+
+ flags |= (__GFP_COMP | __GFP_KMEMCG);
+ ret = (void *) __get_free_pages(flags, order);
+ kmemleak_alloc(ret, size, 1, flags);
+ return ret;
+}
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ return kmalloc_order(size, flags, order);
+}
+#endif
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kcalloc).
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
+ }
#endif
+ }
+ return __kmalloc(size, flags);
+}
/*
* Determine size used for the nth kmalloc cache.
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n)
return 0;
}
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+ int i = kmalloc_index(size);
+
+ if (!i)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+ flags, node, size);
+ }
+#endif
+ return __kmalloc_node(size, flags, node);
+}
+
/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
-#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
-/**
- * kmalloc_node - allocate memory from a specific node
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: node to allocate from.
- *
- * kmalloc() for non-local nodes, used to allocate from a specific node
- * if available. Equivalent to kmalloc() in the non-NUMA single-node
- * case.
- */
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return kmalloc(size, flags);
-}
-
-static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-
-static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
- gfp_t flags, int node)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
-
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd40158..e9346b4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
/*
* Definitions unique to the original Linux SLAB allocator.
- *
- * What we provide here is a way to optimize the frequent kmalloc
- * calls in the kernel by selecting the appropriate general cache
- * if kmalloc was called with a size that can be established at
- * compile time.
- */
-
-#include <linux/init.h>
-#include <linux/compiler.h>
-
-/*
- * struct kmem_cache
- *
- * manages a cache.
*/
struct kmem_cache {
@@ -102,96 +88,4 @@ struct kmem_cache {
*/
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- struct kmem_cache *cachep;
- void *ret;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- ret = kmem_cache_alloc_trace(cachep, flags, size);
-
- return ret;
- }
- return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
-extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size)
-{
- return kmem_cache_alloc_node(cachep, flags, nodeid);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- struct kmem_cache *cachep;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-
-#endif /* CONFIG_NUMA */
-
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644
index 095a5a4..0000000
--- a/include/linux/slob_def.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __LINUX_SLOB_DEF_H
-#define __LINUX_SLOB_DEF_H
-
-#include <linux/numa.h>
-
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
- gfp_t flags)
-{
- return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc_node(size, flags, node);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- return __kmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
-{
- return kmalloc(size, flags);
-}
-
-#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 027276f..cc0b67e 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -6,14 +6,8 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/bug.h>
-#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <linux/kmemleak.h>
-
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
@@ -104,108 +98,4 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
- void *ret;
-
- flags |= (__GFP_COMP | __GFP_KMEMCG);
- ret = (void *) __get_free_pages(flags, order);
- kmemleak_alloc(ret, size, 1, flags);
- return ret;
-}
-
-/**
- * Calling this on allocated memory will check that the memory
- * is expected to be in use, and print warnings if not.
- */
-#ifdef CONFIG_SLUB_DEBUG
-extern bool verify_mem_not_deleted(const void *x);
-#else
-static inline bool verify_mem_not_deleted(const void *x)
-{
- return true;
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- return kmem_cache_alloc(s, gfpflags);
-}
-
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-
- if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(kmalloc_caches[index],
- flags, size);
- }
- }
- return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_node(s, gfpflags, node);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_node_trace(kmalloc_caches[index],
- flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-#endif
-
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c03c139..46ba0c6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -280,7 +280,7 @@ extern void activate_page(struct page *);
extern void mark_page_accessed(struct page *);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
-extern int lru_add_drain_all(void);
+extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void swap_setup(void);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index a386a1c..b268d3c 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -207,6 +207,16 @@ struct thermal_bind_params {
* See Documentation/thermal/sysfs-api.txt for more information.
*/
int trip_mask;
+
+ /*
+ * This is an array of cooling state limits. Must have exactly
+ * 2 * thermal_zone.number_of_trip_points. It is an array consisting
+ * of tuples <lower-state upper-state> of state limits. Each trip
+ * will be associated with one state limit tuple when binding.
+ * A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS>
+ * on all trips.
+ */
+ unsigned long *binding_limits;
int (*match) (struct thermal_zone_device *tz,
struct thermal_cooling_device *cdev);
};
@@ -214,6 +224,14 @@ struct thermal_bind_params {
/* Structure to define Thermal Zone parameters */
struct thermal_zone_params {
char governor_name[THERMAL_NAME_LENGTH];
+
+ /*
+ * a boolean to indicate if the thermal to hwmon sysfs interface
+ * is required. when no_hwmon == false, a hwmon sysfs interface
+ * will be created. when no_hwmon == true, nothing will be done
+ */
+ bool no_hwmon;
+
int num_tbps; /* Number of tbp entries */
struct thermal_bind_params *tbp;
};
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h
deleted file mode 100644
index 6fb0856..0000000
--- a/include/linux/time-armada-370-xp.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Marvell Armada 370/XP SoC timer handling.
- *
- * Copyright (C) 2012 Marvell
- *
- * Lior Amsalem <alior@marvell.com>
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- */
-#ifndef __TIME_ARMADA_370_XPPRCMU_H
-#define __TIME_ARMADA_370_XPPRCMU_H
-
-void armada_370_xp_timer_init(void);
-
-#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index b3726e6..dd3edd7 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -141,6 +141,7 @@ extern int do_adjtimex(struct timex *);
extern void hardpps(const struct timespec *, const struct timespec *);
int read_current_timer(unsigned long *timer_val);
+void ntp_notify_cmos_timer(void);
/* The clock frequency of the i8253/i8254 PIT */
#define PIT_TICK_RATE 1193182ul