diff options
author | Jiri Kosina <jkosina@suse.cz> | 2013-09-04 08:49:39 (GMT) |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2013-09-04 08:49:57 (GMT) |
commit | efd15f5f4ff63f6ac5d80850686e3d2cc8c4481b (patch) | |
tree | 40024adbe77a3d660662e639fd765097133d648c /include/linux | |
parent | 6c2794a2984f4c17a58117a68703cc7640f01c5a (diff) | |
parent | 58c59bc997d86593f0bea41845885917cf304d22 (diff) | |
download | linux-fsl-qoriq-efd15f5f4ff63f6ac5d80850686e3d2cc8c4481b.tar.xz |
Merge branch 'master' into for-3.12/upstream
Sync with Linus' tree to be able to apply fixup patch on top
of 9d9a04ee75 ("HID: apple: Add support for the 2013 Macbook Air")
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'include/linux')
165 files changed, 4660 insertions, 1113 deletions
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 9069694..a899402 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -44,10 +44,14 @@ struct alarm { void alarm_init(struct alarm *alarm, enum alarmtimer_type type, enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); int alarm_start(struct alarm *alarm, ktime_t start); +int alarm_start_relative(struct alarm *alarm, ktime_t start); +void alarm_restart(struct alarm *alarm); int alarm_try_to_cancel(struct alarm *alarm); int alarm_cancel(struct alarm *alarm); u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval); +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval); +ktime_t alarm_expires_remaining(const struct alarm *alarm); /* Provide way to access the rtc device being used by alarmtimers */ struct rtc_device *alarmtimer_get_rtcdev(void); diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 2a5f64a..10fe2a2 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h @@ -76,11 +76,11 @@ struct pl08x_channel_data { * platform, all inclusive, including multiplexed channels. The available * physical channels will be multiplexed around these signals as they are * requested, just enumerate all possible channels. - * @get_signal: request a physical signal to be used for a DMA transfer + * @get_xfer_signal: request a physical signal to be used for a DMA transfer * immediately: if there is some multiplexing or similar blocking the use * of the channel the transfer can be denied by returning less than zero, * else it returns the allocated signal number - * @put_signal: indicate to the platform that this physical signal is not + * @put_xfer_signal: indicate to the platform that this physical signal is not * running any DMA transfer and multiplexing can be recycled * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2 * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2 @@ -89,8 +89,8 @@ struct pl08x_platform_data { const struct pl08x_channel_data *slave_channels; unsigned int num_slave_channels; struct pl08x_channel_data memcpy_channel; - int (*get_signal)(const struct pl08x_channel_data *); - void (*put_signal)(const struct pl08x_channel_data *, int); + int (*get_xfer_signal)(const struct pl08x_channel_data *); + void (*put_xfer_signal)(const struct pl08x_channel_data *, int); u8 lli_buses; u8 mem_buses; }; diff --git a/include/linux/audit.h b/include/linux/audit.h index b20b038..729a4d1 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -103,8 +103,11 @@ extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); extern void audit_putname(struct filename *name); + +#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ +#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ extern void __audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int parent); + unsigned int flags); extern void __audit_inode_child(const struct inode *parent, const struct dentry *dentry, const unsigned char type); @@ -148,10 +151,22 @@ static inline void audit_getname(struct filename *name) if (unlikely(!audit_dummy_context())) __audit_getname(name); } -static inline void audit_inode(struct filename *name, const struct dentry *dentry, +static inline void audit_inode(struct filename *name, + const struct dentry *dentry, unsigned int parent) { + if (unlikely(!audit_dummy_context())) { + unsigned int flags = 0; + if (parent) + flags |= AUDIT_INODE_PARENT; + __audit_inode(name, dentry, flags); + } +} +static inline void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ if (unlikely(!audit_dummy_context())) - __audit_inode(name, dentry, parent); + __audit_inode(name, dentry, + AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN); } static inline void audit_inode_child(const struct inode *parent, const struct dentry *dentry, @@ -311,7 +326,7 @@ static inline void audit_putname(struct filename *name) { } static inline void __audit_inode(struct filename *name, const struct dentry *dentry, - unsigned int parent) + unsigned int flags) { } static inline void __audit_inode_child(const struct inode *parent, const struct dentry *dentry, @@ -321,6 +336,9 @@ static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int parent) { } +static inline void audit_inode_parent_hidden(struct filename *name, + const struct dentry *dentry) +{ } static inline void audit_inode_child(const struct inode *parent, const struct dentry *dentry, const unsigned char type) diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 2e34db8..622fc50 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -144,6 +144,7 @@ struct bcma_host_ops { /* Chip IDs of PCIe devices */ #define BCMA_CHIP_ID_BCM4313 0x4313 +#define BCMA_CHIP_ID_BCM43142 43142 #define BCMA_CHIP_ID_BCM43224 43224 #define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8 #define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index b8b09ea..c49e1a1 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -330,6 +330,8 @@ #define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */ #define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */ #define BCMA_CC_PMU_STAT 0x0608 /* PMU status */ +#define BCMA_CC_PMU_STAT_EXT_LPO_AVAIL 0x00000100 +#define BCMA_CC_PMU_STAT_WDRESET 0x00000080 #define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */ #define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */ #define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */ @@ -355,6 +357,11 @@ #define BCMA_CC_REGCTL_DATA 0x065C #define BCMA_CC_PLLCTL_ADDR 0x0660 #define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ +#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ +#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_MASK 0x80000000 +#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT 31 #define BCMA_CC_SPROM 0x0800 /* SPROM beginning */ /* NAND flash MLC controller registers (corerev >= 38) */ #define BCMA_CC_NAND_REVISION 0x0C00 @@ -435,6 +442,23 @@ #define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 #define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0 +/* PMU rev 15 */ +#define BCMA_CC_PMU15_PLL_PLLCTL0 0 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 +#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_SHIFT 0 +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC +#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT 2 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 +#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_SHIFT 22 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 +#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_SHIFT 24 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 +#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 +#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_SHIFT 30 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 +#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 + /* ALP clock on pre-PMU chips */ #define BCMA_CC_PMU_ALP_CLOCK 20000000 /* HT clock for systems with PMU-enabled chipcommon */ @@ -507,6 +531,37 @@ #define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18) #define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19) +#define BCMA_RES_4314_LPLDO_PU BIT(0) +#define BCMA_RES_4314_PMU_SLEEP_DIS BIT(1) +#define BCMA_RES_4314_PMU_BG_PU BIT(2) +#define BCMA_RES_4314_CBUCK_LPOM_PU BIT(3) +#define BCMA_RES_4314_CBUCK_PFM_PU BIT(4) +#define BCMA_RES_4314_CLDO_PU BIT(5) +#define BCMA_RES_4314_LPLDO2_LVM BIT(6) +#define BCMA_RES_4314_WL_PMU_PU BIT(7) +#define BCMA_RES_4314_LNLDO_PU BIT(8) +#define BCMA_RES_4314_LDO3P3_PU BIT(9) +#define BCMA_RES_4314_OTP_PU BIT(10) +#define BCMA_RES_4314_XTAL_PU BIT(11) +#define BCMA_RES_4314_WL_PWRSW_PU BIT(12) +#define BCMA_RES_4314_LQ_AVAIL BIT(13) +#define BCMA_RES_4314_LOGIC_RET BIT(14) +#define BCMA_RES_4314_MEM_SLEEP BIT(15) +#define BCMA_RES_4314_MACPHY_RET BIT(16) +#define BCMA_RES_4314_WL_CORE_READY BIT(17) +#define BCMA_RES_4314_ILP_REQ BIT(18) +#define BCMA_RES_4314_ALP_AVAIL BIT(19) +#define BCMA_RES_4314_MISC_PWRSW_PU BIT(20) +#define BCMA_RES_4314_SYNTH_PWRSW_PU BIT(21) +#define BCMA_RES_4314_RX_PWRSW_PU BIT(22) +#define BCMA_RES_4314_RADIO_PU BIT(23) +#define BCMA_RES_4314_VCO_LDO_PU BIT(24) +#define BCMA_RES_4314_AFE_LDO_PU BIT(25) +#define BCMA_RES_4314_RX_LDO_PU BIT(26) +#define BCMA_RES_4314_TX_LDO_PU BIT(27) +#define BCMA_RES_4314_HT_AVAIL BIT(28) +#define BCMA_RES_4314_MACPHY_CLK_AVAIL BIT(29) + /* Data for the PMU, if available. * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) */ diff --git a/include/linux/bio.h b/include/linux/bio.h index ef24466..ec48bac 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -97,11 +97,11 @@ static inline void *bio_data(struct bio *bio) * permanent PIO fall back, user is probably better off disabling highmem * I/O completely on that queue (see ide-dma for example) */ -#define __bio_kmap_atomic(bio, idx, kmtype) \ +#define __bio_kmap_atomic(bio, idx) \ (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ bio_iovec_idx((bio), (idx))->bv_offset) -#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr) +#define __bio_kunmap_atomic(addr) kunmap_atomic(addr) /* * merge helpers etc diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h deleted file mode 100644 index 72b713a..0000000 --- a/include/linux/can/platform/flexcan.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (C) 2010 Marc Kleine-Budde <kernel@pengutronix.de> - * - * This file is released under the GPLv2 - * - */ - -#ifndef __CAN_PLATFORM_FLEXCAN_H -#define __CAN_PLATFORM_FLEXCAN_H - -/** - * struct flexcan_platform_data - flex CAN controller platform data - * @transceiver_enable: - called to power on/off the transceiver - * - */ -struct flexcan_platform_data { - void (*transceiver_switch)(int enable); -}; - -#endif /* __CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 379f715..0442c3d 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -160,11 +160,6 @@ static inline void ceph_decode_timespec(struct timespec *ts, static inline void ceph_encode_timespec(struct ceph_timespec *tv, const struct timespec *ts) { - BUG_ON(ts->tv_sec < 0); - BUG_ON(ts->tv_sec > (__kernel_time_t)U32_MAX); - BUG_ON(ts->tv_nsec < 0); - BUG_ON(ts->tv_nsec > (long)U32_MAX); - tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); } diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 186db0b..ce6df39 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -145,7 +145,6 @@ struct ceph_osd_request { s32 r_reply_op_result[CEPH_OSD_MAX_OP]; int r_got_reply; int r_linger; - int r_completed; struct ceph_osd_client *r_osdc; struct kref r_kref; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index fd097ec..e9ac882 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -278,6 +278,8 @@ enum { * * - memcg: use_hierarchy is on by default and the cgroup file for * the flag is not created. + * + * - blkcg: blk-throttle becomes properly hierarchical. */ CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), @@ -540,8 +542,7 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); -int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, - char *buf, size_t buflen); +int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); int cgroup_task_count(const struct cgroup *cgrp); diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 6e7ec64..b613ffd 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -1,86 +1,55 @@ -/* Add subsystem definitions of the form SUBSYS(<name>) in this - * file. Surround each one by a line of comment markers so that - * patches don't collide +/* + * List of cgroup subsystems. + * + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. */ - -/* */ - -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CPUSETS) SUBSYS(cpuset) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG) SUBSYS(debug) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED) SUBSYS(cpu_cgroup) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT) SUBSYS(cpuacct) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_MEMCG) SUBSYS(mem_cgroup) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE) SUBSYS(devices) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER) SUBSYS(freezer) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP) SUBSYS(net_cls) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP) SUBSYS(blkio) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF) SUBSYS(perf) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP) SUBSYS(net_prio) #endif -/* */ - #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB) SUBSYS(hugetlb) #endif - -/* */ - -#ifdef CONFIG_CGROUP_BCACHE -SUBSYS(bcache) -#endif - -/* */ +/* + * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. + */ diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index 963d714..0857922 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h @@ -30,6 +30,7 @@ enum clock_event_nofitiers { #include <linux/notifier.h> struct clock_event_device; +struct module; /* Clock event mode commands */ enum clock_event_mode { @@ -83,6 +84,7 @@ enum clock_event_mode { * @irq: IRQ number (only for non CPU local devices) * @cpumask: cpumask to indicate for which CPUs this device works * @list: list head for the management code + * @owner: module reference */ struct clock_event_device { void (*event_handler)(struct clock_event_device *); @@ -112,6 +114,7 @@ struct clock_event_device { int irq; const struct cpumask *cpumask; struct list_head list; + struct module *owner; } ____cacheline_aligned; /* @@ -138,6 +141,7 @@ static inline unsigned long div_sc(unsigned long ticks, unsigned long nsec, extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt); extern void clockevents_register_device(struct clock_event_device *dev); +extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); extern void clockevents_config(struct clock_event_device *dev, u32 freq); extern void clockevents_config_and_register(struct clock_event_device *dev, @@ -150,7 +154,6 @@ extern void clockevents_exchange_device(struct clock_event_device *old, struct clock_event_device *new); extern void clockevents_set_mode(struct clock_event_device *dev, enum clock_event_mode mode); -extern int clockevents_register_notifier(struct notifier_block *nb); extern int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, bool force); diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 7279b94..dbbf8aa 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -21,6 +21,7 @@ /* clocksource cycle base type */ typedef u64 cycle_t; struct clocksource; +struct module; #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA #include <asm/clocksource.h> @@ -162,6 +163,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary * @cycle_last: most recent cycle counter value seen by ::read() + * @owner: module reference, must be set by clocksource in modules */ struct clocksource { /* @@ -195,6 +197,7 @@ struct clocksource { cycle_t cs_last; cycle_t wd_last; #endif + struct module *owner; } ____cacheline_aligned; /* @@ -207,6 +210,7 @@ struct clocksource { #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 #define CLOCK_SOURCE_UNSTABLE 0x40 #define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80 +#define CLOCK_SOURCE_RESELECT 0x100 /* simplify initialization of mask field */ #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) @@ -279,7 +283,7 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) extern int clocksource_register(struct clocksource*); -extern void clocksource_unregister(struct clocksource*); +extern int clocksource_unregister(struct clocksource*); extern void clocksource_touch_watchdog(void); extern struct clocksource* clocksource_get_next(void); extern void clocksource_change_rating(struct clocksource *cs, int rating); @@ -321,7 +325,7 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) } -extern void timekeeping_notify(struct clocksource *clock); +extern int timekeeping_notify(struct clocksource *clock); extern cycle_t clocksource_mmio_readl_up(struct clocksource *); extern cycle_t clocksource_mmio_readl_down(struct clocksource *); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 9f3c7e8..ab0eade 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -6,9 +6,8 @@ * definitions of processors. * * Basic handling of the devices is done in drivers/base/cpu.c - * and system devices are handled in drivers/base/sys.c. * - * CPUs are exported via sysfs in the class/cpu/devices/ + * CPUs are exported via sysfs in the devices/system/cpu * directory. */ #ifndef _LINUX_CPU_H_ @@ -115,7 +114,7 @@ enum { /* Need to know about CPUs going up/down? */ #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) #define cpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb __cpuinitdata = \ + static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ register_cpu_notifier(&fn##_nb); \ } diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index 282e270..a5d52ee 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -41,7 +41,7 @@ cpufreq_cooling_register(const struct cpumask *clip_cpus); */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev); -unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int); +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq); #else /* !CONFIG_CPU_THERMAL */ static inline struct thermal_cooling_device * cpufreq_cooling_register(const struct cpumask *clip_cpus) @@ -54,7 +54,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) return; } static inline -unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int) +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) { return THERMAL_CSTATE_INVALID; } diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 4d7390b..90d5a15 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -119,7 +119,7 @@ struct cpufreq_policy { struct kobject kobj; struct completion kobj_unregister; - bool transition_ongoing; /* Tracks transition status */ + int transition_ongoing; /* Tracks transition status */ }; #define CPUFREQ_ADJUST (0) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f42dbe1..b90337c 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -324,6 +324,11 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) return ret; } +static inline unsigned d_count(const struct dentry *dentry) +{ + return dentry->d_count; +} + /* validate "insecure" dentry pointer */ extern int d_validate(struct dentry *, struct dentry *); diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h new file mode 100644 index 0000000..d5b68bf --- /dev/null +++ b/include/linux/decompress/unlz4.h @@ -0,0 +1,10 @@ +#ifndef DECOMPRESS_UNLZ4_H +#define DECOMPRESS_UNLZ4_H + +int unlz4(unsigned char *inbuf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *pos, + void(*error)(char *x)); +#endif diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 3cd3247..e151d4c 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -446,9 +446,9 @@ int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); /* * Table reference counting. */ -struct dm_table *dm_get_live_table(struct mapped_device *md); -void dm_table_get(struct dm_table *t); -void dm_table_put(struct dm_table *t); +struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); +void dm_put_live_table(struct mapped_device *md, int srcu_idx); +void dm_sync_table(struct mapped_device *md); /* * Queries diff --git a/include/linux/device.h b/include/linux/device.h index bcf8c0d..22b546a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -47,7 +47,11 @@ struct bus_attribute { }; #define BUS_ATTR(_name, _mode, _show, _store) \ -struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) + struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define BUS_ATTR_RW(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) +#define BUS_ATTR_RO(_name) \ + struct bus_attribute bus_attr_##_name = __ATTR_RO(_name) extern int __must_check bus_create_file(struct bus_type *, struct bus_attribute *); @@ -261,9 +265,12 @@ struct driver_attribute { size_t count); }; -#define DRIVER_ATTR(_name, _mode, _show, _store) \ -struct driver_attribute driver_attr_##_name = \ - __ATTR(_name, _mode, _show, _store) +#define DRIVER_ATTR(_name, _mode, _show, _store) \ + struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DRIVER_ATTR_RW(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RW(_name) +#define DRIVER_ATTR_RO(_name) \ + struct driver_attribute driver_attr_##_name = __ATTR_RO(_name) extern int __must_check driver_create_file(struct device_driver *driver, const struct driver_attribute *attr); @@ -313,6 +320,7 @@ int subsys_virtual_register(struct bus_type *subsys, * @name: Name of the class. * @owner: The module owner. * @class_attrs: Default attributes of this class. + * @dev_groups: Default attributes of the devices that belong to the class. * @dev_attrs: Default attributes of the devices belong to the class. * @dev_bin_attrs: Default binary attributes of the devices belong to the class. * @dev_kobj: The kobject that represents this class and links it into the hierarchy. @@ -342,7 +350,8 @@ struct class { struct module *owner; struct class_attribute *class_attrs; - struct device_attribute *dev_attrs; + struct device_attribute *dev_attrs; /* use dev_groups instead */ + const struct attribute_group **dev_groups; struct bin_attribute *dev_bin_attrs; struct kobject *dev_kobj; @@ -414,8 +423,12 @@ struct class_attribute { const struct class_attribute *attr); }; -#define CLASS_ATTR(_name, _mode, _show, _store) \ -struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define CLASS_ATTR(_name, _mode, _show, _store) \ + struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define CLASS_ATTR_RW(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RW(_name) +#define CLASS_ATTR_RO(_name) \ + struct class_attribute class_attr_##_name = __ATTR_RO(_name) extern int __must_check class_create_file(struct class *class, const struct class_attribute *attr); @@ -423,7 +436,6 @@ extern void class_remove_file(struct class *class, const struct class_attribute *attr); /* Simple class attribute that is just a static string */ - struct class_attribute_string { struct class_attribute attr; char *str; @@ -512,6 +524,10 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, #define DEVICE_ATTR(_name, _mode, _show, _store) \ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) +#define DEVICE_ATTR_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ struct dev_ext_attribute dev_attr_##_name = \ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) } @@ -924,6 +940,11 @@ extern __printf(5, 6) struct device *device_create(struct class *cls, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); +extern __printf(6, 7) +struct device *device_create_with_groups(struct class *cls, + struct device *parent, dev_t devt, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); extern void device_destroy(struct class *cls, dev_t devt); /* diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 1b4d4ee..de7d74a 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h @@ -177,7 +177,11 @@ enum drbd_ret_code { ERR_NEED_APV_100 = 163, ERR_NEED_ALLOW_TWO_PRI = 164, ERR_MD_UNCLEAN = 165, - + ERR_MD_LAYOUT_CONNECTED = 166, + ERR_MD_LAYOUT_TOO_BIG = 167, + ERR_MD_LAYOUT_TOO_SMALL = 168, + ERR_MD_LAYOUT_NO_FIT = 169, + ERR_IMPLICIT_SHRINK = 170, /* insert new ones above this line */ AFTER_LAST_ERR_CODE }; diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h index d0d8fac..e8c4457 100644 --- a/include/linux/drbd_genl.h +++ b/include/linux/drbd_genl.h @@ -181,6 +181,8 @@ GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms, __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size) __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force) __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync) + __u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF) + __u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF) ) GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info, diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 1fedf2b..17e50bb 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h @@ -215,4 +215,13 @@ #define DRBD_ALWAYS_ASBP_DEF 0 #define DRBD_USE_RLE_DEF 1 +#define DRBD_AL_STRIPES_MIN 1 +#define DRBD_AL_STRIPES_MAX 1024 +#define DRBD_AL_STRIPES_DEF 1 +#define DRBD_AL_STRIPES_SCALE '1' + +#define DRBD_AL_STRIPE_SIZE_MIN 4 +#define DRBD_AL_STRIPE_SIZE_MAX 16777216 +#define DRBD_AL_STRIPE_SIZE_DEF 32 +#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */ #endif diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 07261d5..1f79b20 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h @@ -51,6 +51,5 @@ dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base, void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs); void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs); cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs); -void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs); #endif /* __DW_APB_TIMER_H__ */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 0b76327..5c6d7fb 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -622,7 +622,7 @@ struct edac_raw_error_desc { */ struct mem_ctl_info { struct device dev; - struct bus_type bus; + struct bus_type *bus; struct list_head link; /* for global list of mem_ctl_info structs */ @@ -742,4 +742,9 @@ struct mem_ctl_info { #endif }; +/* + * Maximum number of memory controllers in the coherent fabric. + */ +#define EDAC_MAX_MCS 16 + #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 21ae6b3..5f8f176 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -594,8 +594,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); extern int __init efi_uart_console_only (void); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern unsigned long efi_get_time(void); -extern int efi_set_rtc_mmss(unsigned long nowtime); +extern void efi_get_time(struct timespec *now); +extern int efi_set_rtc_mmss(const struct timespec *now); extern void efi_reserve_boot_services(void); extern struct efi_memory_map memmap; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index acd0312..306dd8c 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -7,6 +7,7 @@ #ifdef CONFIG_BLOCK struct io_cq; +struct elevator_type; typedef int (elevator_merge_fn) (struct request_queue *, struct request **, struct bio *); @@ -35,7 +36,8 @@ typedef void (elevator_put_req_fn) (struct request *); typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); -typedef int (elevator_init_fn) (struct request_queue *); +typedef int (elevator_init_fn) (struct request_queue *, + struct elevator_type *e); typedef void (elevator_exit_fn) (struct elevator_queue *); struct elevator_ops @@ -155,6 +157,8 @@ extern int elevator_init(struct request_queue *, char *); extern void elevator_exit(struct elevator_queue *); extern int elevator_change(struct request_queue *, const char *); extern bool elv_rq_merge_ok(struct request *, struct bio *); +extern struct elevator_queue *elevator_alloc(struct request_queue *, + struct elevator_type *); /* * Helper functions. diff --git a/include/linux/fb.h b/include/linux/fb.h index d49c60f..ffac70a 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -624,7 +624,7 @@ extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u3 extern void fb_set_suspend(struct fb_info *info, int state); extern int fb_get_color_depth(struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix); -extern int fb_get_options(char *name, char **option); +extern int fb_get_options(const char *name, char **option); extern int fb_new_modelist(struct fb_info *info); extern struct fb_info *registered_fb[FB_MAX]; diff --git a/include/linux/filter.h b/include/linux/filter.h index f65f5a6..a6ac848 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -59,10 +59,10 @@ extern void bpf_jit_free(struct sk_filter *fp); static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { - pr_err("flen=%u proglen=%u pass=%u image=%p\n", + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", flen, proglen, pass, image); if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 191501a..5d7782e 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -251,8 +251,10 @@ struct ieee1394_device_id; struct fw_driver { struct device_driver driver; + int (*probe)(struct fw_unit *unit, const struct ieee1394_device_id *id); /* Called when the parent device sits through a bus reset. */ void (*update)(struct fw_unit *unit); + void (*remove)(struct fw_unit *unit); const struct ieee1394_device_id *id_table; }; @@ -434,6 +436,7 @@ struct fw_iso_context { int type; int channel; int speed; + bool drop_overflow_headers; size_t header_size; union { fw_iso_callback_t sc; diff --git a/include/linux/fs.h b/include/linux/fs.h index 99be011..9818747 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -10,6 +10,7 @@ #include <linux/stat.h> #include <linux/cache.h> #include <linux/list.h> +#include <linux/llist.h> #include <linux/radix-tree.h> #include <linux/rbtree.h> #include <linux/init.h> @@ -372,8 +373,8 @@ struct address_space_operations { int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, unsigned long *); /* - * migrate the contents of a page to the specified target. If sync - * is false, it must not block. + * migrate the contents of a page to the specified target. If + * migrate_mode is MIGRATE_ASYNC, it must not block. */ int (*migratepage) (struct address_space *, struct page *, struct page *, enum migrate_mode); @@ -768,6 +769,7 @@ struct file { */ union { struct list_head fu_list; + struct llist_node fu_llist; struct rcu_head fu_rcuhead; } f_u; struct path f_path; @@ -954,6 +956,7 @@ struct file_lock { unsigned int fl_flags; unsigned char fl_type; unsigned int fl_pid; + int fl_link_cpu; /* what cpu's list is this on? */ struct pid *fl_nspid; wait_queue_head_t fl_wait; struct file *fl_file; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 99d0fbc..9f15c00 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -566,10 +566,6 @@ static inline ssize_t ftrace_filter_write(struct file *file, const char __user * size_t cnt, loff_t *ppos) { return -ENODEV; } static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { return -ENODEV; } -static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence) -{ - return -ENODEV; -} static inline int ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } #endif /* CONFIG_DYNAMIC_FTRACE */ @@ -828,10 +824,15 @@ enum ftrace_dump_mode; extern enum ftrace_dump_mode ftrace_dump_on_oops; +extern void disable_trace_on_warning(void); +extern int __disable_trace_on_warning; + #ifdef CONFIG_PREEMPT #define INIT_TRACE_RECURSION .trace_recursion = 0, #endif +#else /* CONFIG_TRACING */ +static inline void disable_trace_on_warning(void) { } #endif /* CONFIG_TRACING */ #ifndef INIT_TRACE_RECURSION diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4372658..120d57a 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -78,6 +78,11 @@ struct trace_iterator { /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; + cpumask_var_t started; + + /* it's true when current open file is snapshot */ + bool snapshot; + /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; @@ -90,10 +95,7 @@ struct trace_iterator { loff_t pos; long idx; - cpumask_var_t started; - - /* it's true when current open file is snapshot */ - bool snapshot; + /* All new field here will be zeroed out in pipe_read */ }; enum trace_iter_flags { @@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, const char *name, int offset, int size, int is_signed, int filter_type); extern int trace_add_event_call(struct ftrace_event_call *call); -extern void trace_remove_event_call(struct ftrace_event_call *call); +extern int trace_remove_event_call(struct ftrace_event_call *call); #define is_signed_type(type) (((type)(-1)) < (type)1) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0f615eb..9b4dd49 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -209,7 +209,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) * 0x9 => DMA or NORMAL (MOVABLE+DMA) * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) * 0xb => BAD (MOVABLE+HIGHMEM+DMA) - * 0xc => DMA32 (MOVABLE+HIGHMEM+DMA32) + * 0xc => DMA32 (MOVABLE+DMA32) * 0xd => BAD (MOVABLE+DMA32+DMA) * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 488debb..81cbbdb 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -658,7 +658,6 @@ struct twl4030_power_data { bool use_poweroff; /* Board is wired for TWL poweroff */ }; -extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); extern int twl4030_remove_script(u8 flags); extern void twl4030_power_off(void); @@ -726,7 +725,7 @@ struct twl4030_platform_data { struct regulator_init_data *clk32kg; struct regulator_init_data *v1v8; struct regulator_init_data *v2v1; - /* TWL6025 LDO regulators */ + /* TWL6032 LDO regulators */ struct regulator_init_data *ldo1; struct regulator_init_data *ldo2; struct regulator_init_data *ldo3; @@ -736,7 +735,7 @@ struct twl4030_platform_data { struct regulator_init_data *ldo7; struct regulator_init_data *ldoln; struct regulator_init_data *ldousb; - /* TWL6025 DCDC regulators */ + /* TWL6032 DCDC regulators */ struct regulator_init_data *smps3; struct regulator_init_data *smps4; struct regulator_init_data *vio6025; @@ -753,7 +752,7 @@ struct twl_regulator_driver_data { #define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ #define TWL5031 BIT(2) /* twl5031 has different registers */ #define TWL6030_CLASS BIT(3) /* TWL6030 class */ -#define TWL6025_SUBCLASS BIT(4) /* TWL6025 has changed registers */ +#define TWL6032_SUBCLASS BIT(4) /* TWL6032 has changed registers */ #define TWL4030_ALLOW_UNSUPPORTED BIT(5) /* Some voltages are possible * but not officially supported. * This flag is necessary to @@ -840,20 +839,20 @@ static inline int twl4030charger_usb_en(int enable) { return 0; } #define TWL6030_REG_CLK32KG 48 /* LDOs on 6025 have different names */ -#define TWL6025_REG_LDO2 49 -#define TWL6025_REG_LDO4 50 -#define TWL6025_REG_LDO3 51 -#define TWL6025_REG_LDO5 52 -#define TWL6025_REG_LDO1 53 -#define TWL6025_REG_LDO7 54 -#define TWL6025_REG_LDO6 55 -#define TWL6025_REG_LDOLN 56 -#define TWL6025_REG_LDOUSB 57 +#define TWL6032_REG_LDO2 49 +#define TWL6032_REG_LDO4 50 +#define TWL6032_REG_LDO3 51 +#define TWL6032_REG_LDO5 52 +#define TWL6032_REG_LDO1 53 +#define TWL6032_REG_LDO7 54 +#define TWL6032_REG_LDO6 55 +#define TWL6032_REG_LDOLN 56 +#define TWL6032_REG_LDOUSB 57 /* 6025 DCDC supplies */ -#define TWL6025_REG_SMPS3 58 -#define TWL6025_REG_SMPS4 59 -#define TWL6025_REG_VIO 60 +#define TWL6032_REG_SMPS3 58 +#define TWL6032_REG_SMPS4 59 +#define TWL6032_REG_VIO 60 #endif /* End of __TWL4030_H */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 06b0ed0..b0dc87a 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -146,6 +146,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 #define IEEE80211_MAX_TIM_LEN 251 +#define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. @@ -1829,6 +1830,15 @@ enum ieee80211_key_len { WLAN_KEY_LEN_AES_CMAC = 16, }; +#define IEEE80211_WEP_IV_LEN 4 +#define IEEE80211_WEP_ICV_LEN 4 +#define IEEE80211_CCMP_HDR_LEN 8 +#define IEEE80211_CCMP_MIC_LEN 8 +#define IEEE80211_CCMP_PN_LEN 6 +#define IEEE80211_TKIP_IV_LEN 8 +#define IEEE80211_TKIP_ICV_LEN 4 +#define IEEE80211_CMAC_PN_LEN 6 + /* Public action codes */ enum ieee80211_pub_actioncode { WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4, diff --git a/include/linux/if_link.h b/include/linux/if_link.h index c3f817c..a86784d 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -12,5 +12,6 @@ struct ifla_vf_info { __u32 qos; __u32 tx_rate; __u32 spoofchk; + __u32 linkstate; }; #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 84dde1d..ddd33fd 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -8,7 +8,7 @@ #include <net/netlink.h> #include <linux/u64_stats_sync.h> -#if defined(CONFIG_MACVTAP) || defined(CONFIG_MACVTAP_MODULE) +#if IS_ENABLED(CONFIG_MACVTAP) struct socket *macvtap_get_socket(struct file *); #else #include <linux/err.h> @@ -50,7 +50,7 @@ struct macvlan_pcpu_stats { * Maximum times a macvtap device can be opened. This can be used to * configure the number of receive queue, e.g. for multiqueue virtio. */ -#define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16) +#define MAX_MACVTAP_QUEUES 16 #define MACVLAN_MC_FILTER_BITS 8 #define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS) @@ -65,12 +65,18 @@ struct macvlan_dev { DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); + netdev_features_t set_features; enum macvlan_mode mode; u16 flags; int (*receive)(struct sk_buff *skb); int (*forward)(struct net_device *dev, struct sk_buff *skb); - struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; + /* This array tracks active taps. */ + struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES]; + /* This list tracks all taps (both enabled and disabled) */ + struct list_head queue_list; int numvtaps; + int numqueues; + netdev_features_t tap_features; int minor; }; diff --git a/include/linux/if_team.h b/include/linux/if_team.h index 16fae64..f6156f9 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -69,6 +69,7 @@ struct team_port { s32 priority; /* lower number ~ higher priority */ u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ + struct rcu_head rcu; long mode_priv[0]; }; @@ -228,6 +229,16 @@ static inline struct team_port *team_get_port_by_index(struct team *team, return port; return NULL; } + +static inline int team_num_to_port_index(struct team *team, int num) +{ + int en_port_count = ACCESS_ONCE(team->en_port_count); + + if (unlikely(!en_port_count)) + return 0; + return num % en_port_count; +} + static inline struct team_port *team_get_port_by_index_rcu(struct team *team, int port_index) { diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 637fa71d..715c343 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -79,9 +79,8 @@ static inline int is_vlan_dev(struct net_device *dev) } #define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) -#define vlan_tx_nonzero_tag_present(__skb) \ - (vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK)) #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) @@ -243,8 +242,6 @@ static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, return skb; } -#define HAVE_VLAN_PUT_TAG - /** * vlan_put_tag - inserts VLAN tag according to device features * @skb: skbuff to tag diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 7f2bf15..e3362b5 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -84,6 +84,7 @@ struct ip_mc_list { struct ip_mc_list *next; struct ip_mc_list __rcu *next_rcu; }; + struct ip_mc_list __rcu *next_hash; struct timer_list timer; int users; atomic_t refcnt; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 8d171f4..3d35b70 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -211,8 +211,8 @@ struct iio_chan_spec { static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, enum iio_chan_info_enum type) { - return (chan->info_mask_separate & type) | - (chan->info_mask_shared_by_type & type); + return (chan->info_mask_separate & BIT(type)) | + (chan->info_mask_shared_by_type & BIT(type)); } #define IIO_ST(si, rb, sb, sh) \ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ea1e3b8..b99cd23 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -50,12 +50,17 @@ struct ipv4_devconf { DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); }; +#define MC_HASH_SZ_LOG 9 + struct in_device { struct net_device *dev; atomic_t refcnt; int dead; struct in_ifaddr *ifa_list; /* IP ifaddr chain */ + struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ + struct ip_mc_list __rcu * __rcu *mc_hash; + int mc_count; /* Number of installed mcasts */ spinlock_t mc_tomb_lock; struct ip_mc_list *mc_tomb; diff --git a/include/linux/init.h b/include/linux/init.h index 8618147..e73f2b7 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -93,13 +93,13 @@ #define __exit __section(.exit.text) __exitused __cold notrace -/* Used for HOTPLUG_CPU */ -#define __cpuinit __section(.cpuinit.text) __cold notrace -#define __cpuinitdata __section(.cpuinit.data) -#define __cpuinitconst __constsection(.cpuinit.rodata) -#define __cpuexit __section(.cpuexit.text) __exitused __cold notrace -#define __cpuexitdata __section(.cpuexit.data) -#define __cpuexitconst __constsection(.cpuexit.rodata) +/* temporary, until all users are removed */ +#define __cpuinit +#define __cpuinitdata +#define __cpuinitconst +#define __cpuexit +#define __cpuexitdata +#define __cpuexitconst /* Used for MEMORY_HOTPLUG */ #define __meminit __section(.meminit.text) __cold notrace @@ -118,9 +118,8 @@ #define __INITRODATA .section ".init.rodata","a",%progbits #define __FINITDATA .previous -#define __CPUINIT .section ".cpuinit.text", "ax" -#define __CPUINITDATA .section ".cpuinit.data", "aw" -#define __CPUINITRODATA .section ".cpuinit.rodata", "a" +/* temporary, until all users are removed */ +#define __CPUINIT #define __MEMINIT .section ".meminit.text", "ax" #define __MEMINITDATA .section ".meminit.data", "aw" diff --git a/include/linux/input/ti_am335x_tsc.h b/include/linux/input/ti_am335x_tsc.h deleted file mode 100644 index 49269a2..0000000 --- a/include/linux/input/ti_am335x_tsc.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef __LINUX_TI_AM335X_TSC_H -#define __LINUX_TI_AM335X_TSC_H - -/** - * struct tsc_data Touchscreen wire configuration - * @wires: Wires refer to application modes - * i.e. 4/5/8 wire touchscreen support - * on the platform. - * @x_plate_resistance: X plate resistance. - * @steps_to_configure: The sequencer supports a total of - * 16 programmable steps. - * A step configured to read a single - * co-ordinate value, can be applied - * more number of times for better results. - */ - -struct tsc_data { - int wires; - int x_plate_resistance; - int steps_to_configure; -}; - -#endif diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h index ab14403..b433df8 100644 --- a/include/linux/input/tps6507x-ts.h +++ b/include/linux/input/tps6507x-ts.h @@ -14,7 +14,6 @@ /* Board specific touch screen initial values */ struct touchscreen_init_data { int poll_period; /* ms */ - int vref; /* non-zero to leave vref on */ __u16 min_pressure; /* min reading to be treated as a touch */ __u16 vendor; __u16 product; diff --git a/include/linux/io.h b/include/linux/io.h index 069e407..f4f42fa 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -76,4 +76,29 @@ void devm_ioremap_release(struct device *dev, void *res); #define arch_has_dev_port() (1) #endif +/* + * Some systems (x86 without PAT) have a somewhat reliable way to mark a + * physical address range such that uncached mappings will actually + * end up write-combining. This facility should be used in conjunction + * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has + * no effect if the per-page mechanisms are functional. + * (On x86 without PAT, these functions manipulate MTRRs.) + * + * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed + * to have no effect. + */ +#ifndef arch_phys_wc_add +static inline int __must_check arch_phys_wc_add(unsigned long base, + unsigned long size) +{ + return 0; /* It worked (i.e. did nothing). */ +} + +static inline void arch_phys_wc_del(int handle) +{ +} + +#define arch_phys_wc_add arch_phys_wc_add +#endif + #endif /* _LINUX_IO_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index ba2c708..c983ed1 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -73,57 +73,48 @@ struct irq_domain_chip_generic; /** * struct irq_domain - Hardware interrupt number translation object * @link: Element in global irq_domain list. - * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This - * will be one of the IRQ_DOMAIN_MAP_* values. - * @revmap_data: Revmap method specific data. + * @name: Name of interrupt domain * @ops: pointer to irq_domain methods * @host_data: private data pointer for use by owner. Not touched by irq_domain * core code. - * @irq_base: Start of irq_desc range assigned to the irq_domain. The creator - * of the irq_domain is responsible for allocating the array of - * irq_desc structures. - * @nr_irq: Number of irqs managed by the irq domain - * @hwirq_base: Starting number for hwirqs managed by the irq domain - * @of_node: (optional) Pointer to device tree nodes associated with the - * irq_domain. Used when decoding device tree interrupt specifiers. + * + * Optional elements + * @of_node: Pointer to device tree nodes associated with the irq_domain. Used + * when decoding device tree interrupt specifiers. + * @gc: Pointer to a list of generic chips. There is a helper function for + * setting up one or more generic chips for interrupt controllers + * drivers using the generic chip library which uses this pointer. + * + * Revmap data, used internally by irq_domain + * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that + * support direct mapping + * @revmap_size: Size of the linear map table @linear_revmap[] + * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map + * @linear_revmap: Linear table of hwirq->virq reverse mappings */ struct irq_domain { struct list_head link; - - /* type of reverse mapping_technique */ - unsigned int revmap_type; - union { - struct { - unsigned int size; - unsigned int first_irq; - irq_hw_number_t first_hwirq; - } legacy; - struct { - unsigned int size; - unsigned int *revmap; - } linear; - struct { - unsigned int max_irq; - } nomap; - struct radix_tree_root tree; - } revmap_data; + const char *name; const struct irq_domain_ops *ops; void *host_data; - irq_hw_number_t inval_irq; - /* Optional device node pointer */ + /* Optional data */ struct device_node *of_node; - /* Optional pointer to generic interrupt chips */ struct irq_domain_chip_generic *gc; -}; -#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs. - * ie. legacy 8259, gets irqs 1..15 */ -#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ -#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ -#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */ + /* reverse map data. The linear map gets appended to the irq_domain */ + irq_hw_number_t hwirq_max; + unsigned int revmap_direct_max_irq; + unsigned int revmap_size; + struct radix_tree_root revmap_tree; + unsigned int linear_revmap[]; +}; #ifdef CONFIG_IRQ_DOMAIN +struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, + irq_hw_number_t hwirq_max, int direct_max, + const struct irq_domain_ops *ops, + void *host_data); struct irq_domain *irq_domain_add_simple(struct device_node *of_node, unsigned int size, unsigned int first_irq, @@ -135,21 +126,30 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, irq_hw_number_t first_hwirq, const struct irq_domain_ops *ops, void *host_data); -struct irq_domain *irq_domain_add_linear(struct device_node *of_node, +extern struct irq_domain *irq_find_host(struct device_node *node); +extern void irq_set_default_host(struct irq_domain *host); + +/** + * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. + * @of_node: pointer to interrupt controller's device tree node. + * @size: Number of interrupts in the domain. + * @ops: map/unmap domain callbacks + * @host_data: Controller private data pointer + */ +static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node, unsigned int size, const struct irq_domain_ops *ops, - void *host_data); -struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, + void *host_data) +{ + return __irq_domain_add(of_node, size, size, 0, ops, host_data); +} +static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, unsigned int max_irq, const struct irq_domain_ops *ops, - void *host_data); -struct irq_domain *irq_domain_add_tree(struct device_node *of_node, - const struct irq_domain_ops *ops, - void *host_data); - -extern struct irq_domain *irq_find_host(struct device_node *node); -extern void irq_set_default_host(struct irq_domain *host); - + void *host_data) +{ + return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data); +} static inline struct irq_domain *irq_domain_add_legacy_isa( struct device_node *of_node, const struct irq_domain_ops *ops, @@ -158,21 +158,40 @@ static inline struct irq_domain *irq_domain_add_legacy_isa( return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, host_data); } +static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node, + const struct irq_domain_ops *ops, + void *host_data) +{ + return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data); +} extern void irq_domain_remove(struct irq_domain *host); -extern int irq_domain_associate_many(struct irq_domain *domain, - unsigned int irq_base, - irq_hw_number_t hwirq_base, int count); -static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - return irq_domain_associate_many(domain, irq, hwirq, 1); -} +extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq); +extern void irq_domain_associate_many(struct irq_domain *domain, + unsigned int irq_base, + irq_hw_number_t hwirq_base, int count); extern unsigned int irq_create_mapping(struct irq_domain *host, irq_hw_number_t hwirq); extern void irq_dispose_mapping(unsigned int virq); + +/** + * irq_linear_revmap() - Find a linux irq from a hw irq number. + * @domain: domain owning this hardware interrupt + * @hwirq: hardware irq number in that domain space + * + * This is a fast path alternative to irq_find_mapping() that can be + * called directly by irq controller code to save a handful of + * instructions. It is always safe to call, but won't find irqs mapped + * using the radix tree. + */ +static inline unsigned int irq_linear_revmap(struct irq_domain *domain, + irq_hw_number_t hwirq) +{ + return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; +} extern unsigned int irq_find_mapping(struct irq_domain *host, irq_hw_number_t hwirq); extern unsigned int irq_create_direct_mapping(struct irq_domain *host); @@ -186,9 +205,6 @@ static inline int irq_create_identity_mapping(struct irq_domain *host, return irq_create_strict_mappings(host, hwirq, hwirq, 1); } -extern unsigned int irq_linear_revmap(struct irq_domain *host, - irq_hw_number_t hwirq); - extern const struct irq_domain_ops irq_domain_simple_ops; /* stock xlate functions */ @@ -202,14 +218,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_type); -#if defined(CONFIG_OF_IRQ) -extern void irq_domain_generate_simple(const struct of_device_id *match, - u64 phys_base, unsigned int irq_start); -#else /* CONFIG_OF_IRQ */ -static inline void irq_domain_generate_simple(const struct of_device_id *match, - u64 phys_base, unsigned int irq_start) { } -#endif /* !CONFIG_OF_IRQ */ - #else /* CONFIG_IRQ_DOMAIN */ static inline void irq_dispose_mapping(unsigned int virq) { } #endif /* !CONFIG_IRQ_DOMAIN */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 8fb8edf..97ba4e7 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -139,6 +139,10 @@ static inline u64 get_jiffies_64(void) ((__s64)(a) - (__s64)(b) >= 0)) #define time_before_eq64(a,b) time_after_eq64(b,a) +#define time_in_range64(a, b, c) \ + (time_after_eq64(a, b) && \ + time_before_eq64(a, c)) + /* * These four macros compare jiffies and 'a' for convenience. */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7e6b97e..482ad2d 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -561,9 +561,6 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...); extern __printf(2, 3) int __trace_printk(unsigned long ip, const char *fmt, ...); -extern int __trace_bputs(unsigned long ip, const char *str); -extern int __trace_puts(unsigned long ip, const char *str, int size); - /** * trace_puts - write a string into the ftrace buffer * @str: the string to record @@ -599,6 +596,8 @@ extern int __trace_puts(unsigned long ip, const char *str, int size); else \ __trace_puts(_THIS_IP_, str, strlen(str)); \ }) +extern int __trace_bputs(unsigned long ip, const char *str); +extern int __trace_puts(unsigned long ip, const char *str, int size); extern void trace_dump_stack(int skip); @@ -630,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); static inline void tracing_start(void) { } static inline void tracing_stop(void) { } static inline void ftrace_off_permanent(void) { } -static inline void trace_dump_stack(void) { } +static inline void trace_dump_stack(int skip) { } static inline void tracing_on(void) { } static inline void tracing_off(void) { } diff --git a/include/linux/ktime.h b/include/linux/ktime.h index bbca128..debf208 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -229,7 +229,8 @@ static inline ktime_t timespec_to_ktime(const struct timespec ts) static inline ktime_t timeval_to_ktime(const struct timeval tv) { return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, - .nsec = (s32)tv.tv_usec * 1000 } }; + .nsec = (s32)(tv.tv_usec * + NSEC_PER_USEC) } }; } /** @@ -320,12 +321,17 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec) { - return ktime_add_ns(kt, usec * 1000); + return ktime_add_ns(kt, usec * NSEC_PER_USEC); +} + +static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec) +{ + return ktime_add_ns(kt, msec * NSEC_PER_MSEC); } static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) { - return ktime_sub_ns(kt, usec * 1000); + return ktime_sub_ns(kt, usec * NSEC_PER_USEC); } extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); @@ -338,7 +344,8 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); * * Returns true if there was a successful conversion, false if kt was 0. */ -static inline bool ktime_to_timespec_cond(const ktime_t kt, struct timespec *ts) +static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, + struct timespec *ts) { if (kt.tv64) { *ts = ktime_to_timespec(kt); @@ -366,7 +373,15 @@ extern void ktime_get_ts(struct timespec *ts); static inline ktime_t ns_to_ktime(u64 ns) { static const ktime_t ktime_zero = { .tv64 = 0 }; + return ktime_add_ns(ktime_zero, ns); } +static inline ktime_t ms_to_ktime(u64 ms) +{ + static const ktime_t ktime_zero = { .tv64 = 0 }; + + return ktime_add_ms(ktime_zero, ms); +} + #endif diff --git a/include/linux/list.h b/include/linux/list.h index b83e565..f4d8a2f 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -381,17 +381,6 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; pos != (head); pos = pos->next) /** - * __list_for_each - iterate over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - * - * This variant doesn't differ from list_for_each() any more. - * We don't do prefetching in either case. - */ -#define __list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** * list_for_each_prev - iterate over a list backwards * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/llist.h b/include/linux/llist.h index a5199f6..cdaa7f0 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -142,6 +142,9 @@ static inline struct llist_node *llist_next(struct llist_node *node) return node->next; } +extern bool llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head); /** * llist_add - add a new entry * @new: new entry to be added @@ -151,18 +154,7 @@ static inline struct llist_node *llist_next(struct llist_node *node) */ static inline bool llist_add(struct llist_node *new, struct llist_head *head) { - struct llist_node *entry, *old_entry; - - entry = head->first; - for (;;) { - old_entry = entry; - new->next = entry; - entry = cmpxchg(&head->first, old_entry, new); - if (entry == old_entry) - break; - } - - return old_entry == NULL; + return llist_add_batch(new, new, head); } /** @@ -178,9 +170,6 @@ static inline struct llist_node *llist_del_all(struct llist_head *head) return xchg(&head->first, NULL); } -extern bool llist_add_batch(struct llist_node *new_first, - struct llist_node *new_last, - struct llist_head *head); extern struct llist_node *llist_del_first(struct llist_head *head); #endif /* LLIST_H */ diff --git a/include/linux/lz4.h b/include/linux/lz4.h new file mode 100644 index 0000000..d21c13f --- /dev/null +++ b/include/linux/lz4.h @@ -0,0 +1,87 @@ +#ifndef __LZ4_H__ +#define __LZ4_H__ +/* + * LZ4 Kernel Interface + * + * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *)) +#define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *)) + +/* + * lz4_compressbound() + * Provides the maximum size that LZ4 may output in a "worst case" scenario + * (input data not compressible) + */ +static inline size_t lz4_compressbound(size_t isize) +{ + return isize + (isize / 255) + 16; +} + +/* + * lz4_compress() + * src : source address of the original data + * src_len : size of the original data + * dst : output buffer address of the compressed data + * This requires 'dst' of size LZ4_COMPRESSBOUND. + * dst_len : is the output size, which is returned after compress done + * workmem : address of the working memory. + * This requires 'workmem' of size LZ4_MEM_COMPRESS. + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer and workmem must be already allocated with + * the defined size. + */ +int lz4_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + + /* + * lz4hc_compress() + * src : source address of the original data + * src_len : size of the original data + * dst : output buffer address of the compressed data + * This requires 'dst' of size LZ4_COMPRESSBOUND. + * dst_len : is the output size, which is returned after compress done + * workmem : address of the working memory. + * This requires 'workmem' of size LZ4HC_MEM_COMPRESS. + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer and workmem must be already allocated with + * the defined size. + */ +int lz4hc_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + +/* + * lz4_decompress() + * src : source address of the compressed data + * src_len : is the input size, whcih is returned after decompress done + * dest : output buffer address of the decompressed data + * actual_dest_len: is the size of uncompressed data, supposing it's known + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer must be already allocated. + * slightly faster than lz4_decompress_unknownoutputsize() + */ +int lz4_decompress(const char *src, size_t *src_len, char *dest, + size_t actual_dest_len); + +/* + * lz4_decompress_unknownoutputsize() + * src : source address of the compressed data + * src_len : is the input size, therefore the compressed size + * dest : output buffer address of the decompressed data + * dest_len: is the max size of the destination buffer, which is + * returned with actual size of decompressed data after + * decompress done + * return : Success if return 0 + * Error if return (< 0) + * note : Destination buffer must be already allocated. + */ +int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, + char *dest, size_t *dest_len); +#endif diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index dd3c34e..8e9a029 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -14,6 +14,8 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1116R 0x01410e40 +#define MARVELL_PHY_ID_88E1510 0x01410dd0 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index e94537b..97cb283 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -17,7 +17,6 @@ #include <linux/regmap.h> #include <linux/atomic.h> -#define PM80X_VERSION_MASK (0xFF) /* 80X chip ID mask */ enum { CHIP_INVALID = 0, CHIP_PM800, @@ -299,8 +298,7 @@ struct pm80x_chip { struct regmap *regmap; struct regmap_irq_chip *regmap_irq_chip; struct regmap_irq_chip_data *irq_data; - unsigned char version; - int id; + int type; int irq; int irq_mode; unsigned long wu_flag; @@ -309,8 +307,14 @@ struct pm80x_chip { struct pm80x_platform_data { struct pm80x_rtc_pdata *rtc; - unsigned short power_page_addr; /* power page I2C address */ - unsigned short gpadc_page_addr; /* gpadc page I2C address */ + /* + * For the regulator not defined, set regulators[not_defined] to be + * NULL. num_regulators are the number of regulators supposed to be + * initialized. If all regulators are not defined, set num_regulators + * to be 0. + */ + struct regulator_init_data *regulators[PM800_ID_RG_MAX]; + unsigned int num_regulators; int irq_mode; /* Clear interrupt by read/write(0/1) */ int batt_det; /* enable/disable */ int (*plat_config)(struct pm80x_chip *chip, @@ -363,7 +367,6 @@ static inline int pm80x_dev_resume(struct device *dev) } #endif -extern int pm80x_init(struct i2c_client *client, - const struct i2c_device_id *id); +extern int pm80x_init(struct i2c_client *client); extern int pm80x_deinit(void); #endif /* __LINUX_MFD_88PM80X_H */ diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h index 0390d59..f4acd89 100644 --- a/include/linux/mfd/abx500/ab8500.h +++ b/include/linux/mfd/abx500/ab8500.h @@ -291,6 +291,8 @@ enum ab8500_version { #define AB8540_INT_FSYNC2R 213 #define AB8540_INT_BITCLK2F 214 #define AB8540_INT_BITCLK2R 215 +/* ab8540_irq_regoffset[27] -> IT[Source|Latch|Mask]33 */ +#define AB8540_INT_RTC_1S 216 /* * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index f797bb9..5cf8b91 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -23,6 +23,7 @@ enum arizona_type { WM5102 = 1, WM5110 = 2, + WM8997 = 3, }; #define ARIZONA_IRQ_GP1 0 @@ -121,5 +122,6 @@ int arizona_set_irq_wake(struct arizona *arizona, int irq, int on); int wm5102_patch(struct arizona *arizona); int wm5110_patch(struct arizona *arizona); +int wm8997_patch(struct arizona *arizona); #endif diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index 7dd6524..13a1ee9 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -28,6 +28,8 @@ #include <linux/mfd/core.h> #include <linux/platform_data/edma.h> +#include <mach/hardware.h> + /* * Register values. */ @@ -111,8 +113,6 @@ struct davinci_vc { /* Memory resources */ void __iomem *base; - resource_size_t pbase; - size_t base_size; /* MFD cells */ struct mfd_cell cells[DAVINCI_VC_CELLS]; diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h new file mode 100644 index 0000000..b911ef3 --- /dev/null +++ b/include/linux/mfd/kempld.h @@ -0,0 +1,125 @@ +/* + * Kontron PLD driver definitions + * + * Copyright (c) 2010-2012 Kontron Europe GmbH + * Author: Michael Brunner <michael.brunner@kontron.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License 2 as published + * by the Free Software Foundation. + */ + +#ifndef _LINUX_MFD_KEMPLD_H_ +#define _LINUX_MFD_KEMPLD_H_ + +/* kempld register definitions */ +#define KEMPLD_IOINDEX 0xa80 +#define KEMPLD_IODATA 0xa81 +#define KEMPLD_MUTEX_KEY 0x80 +#define KEMPLD_VERSION 0x00 +#define KEMPLD_VERSION_LSB 0x00 +#define KEMPLD_VERSION_MSB 0x01 +#define KEMPLD_VERSION_GET_MINOR(x) (x & 0x1f) +#define KEMPLD_VERSION_GET_MAJOR(x) ((x >> 5) & 0x1f) +#define KEMPLD_VERSION_GET_NUMBER(x) ((x >> 10) & 0xf) +#define KEMPLD_VERSION_GET_TYPE(x) ((x >> 14) & 0x3) +#define KEMPLD_BUILDNR 0x02 +#define KEMPLD_BUILDNR_LSB 0x02 +#define KEMPLD_BUILDNR_MSB 0x03 +#define KEMPLD_FEATURE 0x04 +#define KEMPLD_FEATURE_LSB 0x04 +#define KEMPLD_FEATURE_MSB 0x05 +#define KEMPLD_FEATURE_BIT_I2C (1 << 0) +#define KEMPLD_FEATURE_BIT_WATCHDOG (1 << 1) +#define KEMPLD_FEATURE_BIT_GPIO (1 << 2) +#define KEMPLD_FEATURE_MASK_UART (7 << 3) +#define KEMPLD_FEATURE_BIT_NMI (1 << 8) +#define KEMPLD_FEATURE_BIT_SMI (1 << 9) +#define KEMPLD_FEATURE_BIT_SCI (1 << 10) +#define KEMPLD_SPEC 0x06 +#define KEMPLD_SPEC_GET_MINOR(x) (x & 0x0f) +#define KEMPLD_SPEC_GET_MAJOR(x) ((x >> 4) & 0x0f) +#define KEMPLD_IRQ_GPIO 0x35 +#define KEMPLD_IRQ_I2C 0x36 +#define KEMPLD_CFG 0x37 +#define KEMPLD_CFG_GPIO_I2C_MUX (1 << 0) +#define KEMPLD_CFG_BIOS_WP (1 << 7) + +#define KEMPLD_CLK 33333333 + +#define KEMPLD_TYPE_RELEASE 0x0 +#define KEMPLD_TYPE_DEBUG 0x1 +#define KEMPLD_TYPE_CUSTOM 0x2 + +/** + * struct kempld_info - PLD device information structure + * @major: PLD major revision + * @minor: PLD minor revision + * @buildnr: PLD build number + * @number: PLD board specific index + * @type: PLD type + * @spec_major: PLD FW specification major revision + * @spec_minor: PLD FW specification minor revision + */ +struct kempld_info { + unsigned int major; + unsigned int minor; + unsigned int buildnr; + unsigned int number; + unsigned int type; + unsigned int spec_major; + unsigned int spec_minor; +}; + +/** + * struct kempld_device_data - Internal representation of the PLD device + * @io_base: Pointer to the IO memory + * @io_index: Pointer to the IO index register + * @io_data: Pointer to the IO data register + * @pld_clock: PLD clock frequency + * @feature_mask: PLD feature mask + * @dev: Pointer to kernel device structure + * @info: KEMPLD info structure + * @lock: PLD mutex + */ +struct kempld_device_data { + void __iomem *io_base; + void __iomem *io_index; + void __iomem *io_data; + u32 pld_clock; + u32 feature_mask; + struct device *dev; + struct kempld_info info; + struct mutex lock; +}; + +/** + * struct kempld_platform_data - PLD hardware configuration structure + * @pld_clock: PLD clock frequency + * @gpio_base GPIO base pin number + * @ioresource: IO addresses of the PLD + * @get_mutex: PLD specific get_mutex callback + * @release_mutex: PLD specific release_mutex callback + * @get_info: PLD specific get_info callback + * @register_cells: PLD specific register_cells callback + */ +struct kempld_platform_data { + u32 pld_clock; + int gpio_base; + struct resource *ioresource; + void (*get_hardware_mutex) (struct kempld_device_data *); + void (*release_hardware_mutex) (struct kempld_device_data *); + int (*get_info) (struct kempld_device_data *); + int (*register_cells) (struct kempld_device_data *); +}; + +extern void kempld_get_mutex(struct kempld_device_data *pld); +extern void kempld_release_mutex(struct kempld_device_data *pld); +extern u8 kempld_read8(struct kempld_device_data *pld, u8 index); +extern void kempld_write8(struct kempld_device_data *pld, u8 index, u8 data); +extern u16 kempld_read16(struct kempld_device_data *pld, u8 index); +extern void kempld_write16(struct kempld_device_data *pld, u8 index, u16 data); +extern u32 kempld_read32(struct kempld_device_data *pld, u8 index); +extern void kempld_write32(struct kempld_device_data *pld, u8 index, u32 data); + +#endif /* _LINUX_MFD_KEMPLD_H_ */ diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h index effa5d3..84844e0 100644 --- a/include/linux/mfd/max8998-private.h +++ b/include/linux/mfd/max8998-private.h @@ -132,9 +132,12 @@ enum { #define MAX8998_ENRAMP (1 << 4) +struct irq_domain; + /** * struct max8998_dev - max8998 master device for sub-drivers * @dev: master device of the chip (can be used to access platform data) + * @pdata: platform data for the driver and subdrivers * @i2c: i2c client private data for regulator * @rtc: i2c client private data for rtc * @iolock: mutex for serializing io access @@ -148,12 +151,14 @@ enum { */ struct max8998_dev { struct device *dev; + struct max8998_platform_data *pdata; struct i2c_client *i2c; struct i2c_client *rtc; struct mutex iolock; struct mutex irqlock; - int irq_base; + unsigned int irq_base; + struct irq_domain *irq_domain; int irq; int ono; u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS]; diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h index 6823548..e3956a6 100644 --- a/include/linux/mfd/max8998.h +++ b/include/linux/mfd/max8998.h @@ -58,10 +58,12 @@ enum { * max8998_regulator_data - regulator data * @id: regulator id * @initdata: regulator init data (contraints, supplies, ...) + * @reg_node: DT node of regulator (unused on non-DT platforms) */ struct max8998_regulator_data { int id; struct regulator_init_data *initdata; + struct device_node *reg_node; }; /** @@ -73,12 +75,8 @@ struct max8998_regulator_data { * @buck_voltage_lock: Do NOT change the values of the following six * registers set by buck?_voltage?. The voltage of BUCK1/2 cannot * be other than the preset values. - * @buck1_voltage1: BUCK1 DVS mode 1 voltage register - * @buck1_voltage2: BUCK1 DVS mode 2 voltage register - * @buck1_voltage3: BUCK1 DVS mode 3 voltage register - * @buck1_voltage4: BUCK1 DVS mode 4 voltage register - * @buck2_voltage1: BUCK2 DVS mode 1 voltage register - * @buck2_voltage2: BUCK2 DVS mode 2 voltage register + * @buck1_voltage: BUCK1 DVS mode 1 voltage registers + * @buck2_voltage: BUCK2 DVS mode 2 voltage registers * @buck1_set1: BUCK1 gpio pin 1 to set output voltage * @buck1_set2: BUCK1 gpio pin 2 to set output voltage * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2 @@ -100,15 +98,11 @@ struct max8998_regulator_data { struct max8998_platform_data { struct max8998_regulator_data *regulators; int num_regulators; - int irq_base; + unsigned int irq_base; int ono; bool buck_voltage_lock; - int buck1_voltage1; - int buck1_voltage2; - int buck1_voltage3; - int buck1_voltage4; - int buck2_voltage1; - int buck2_voltage2; + int buck1_voltage[4]; + int buck2_voltage[2]; int buck1_set1; int buck1_set2; int buck1_default_idx; diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 9b81b2b..1a8dd7a 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -34,6 +34,19 @@ ((a) == PALMAS_CHIP_ID)) #define is_palmas_charger(a) ((a) == PALMAS_CHIP_CHARGER_ID) +/** + * Palmas PMIC feature types + * + * PALMAS_PMIC_FEATURE_SMPS10_BOOST - used when the PMIC provides SMPS10_BOOST + * regulator. + * + * PALMAS_PMIC_HAS(b, f) - macro to check if a bandgap device is capable of a + * specific feature (above) or not. Return non-zero, if yes. + */ +#define PALMAS_PMIC_FEATURE_SMPS10_BOOST BIT(0) +#define PALMAS_PMIC_HAS(b, f) \ + ((b)->features & PALMAS_PMIC_FEATURE_ ## f) + struct palmas_pmic; struct palmas_gpadc; struct palmas_resource; @@ -54,6 +67,7 @@ struct palmas { /* Stored chip id */ int id; + unsigned int features; /* IRQ Data */ int irq; u32 irq_mask; diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h index 86bc635..7a9f708 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/mfd/rtsx_pci.h @@ -575,6 +575,7 @@ #define CARD_PWR_CTL 0xFD50 #define CARD_CLK_SWITCH 0xFD51 +#define RTL8411B_PACKAGE_MODE 0xFD51 #define CARD_SHARE_MODE 0xFD52 #define CARD_DRIVE_SEL 0xFD53 #define CARD_STOP 0xFD54 diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index f0f4de3..378ae8a 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -14,8 +14,6 @@ #ifndef __LINUX_MFD_SEC_CORE_H #define __LINUX_MFD_SEC_CORE_H -#define NUM_IRQ_REGS 4 - enum sec_device_type { S5M8751X, S5M8763X, @@ -44,8 +42,6 @@ struct sec_pmic_dev { struct regmap *regmap; struct i2c_client *i2c; struct i2c_client *rtc; - struct mutex iolock; - struct mutex irqlock; int device_type; int irq_base; @@ -53,8 +49,6 @@ struct sec_pmic_dev { struct regmap_irq_chip_data *irq_data; int ono; - u8 irq_masks_cur[NUM_IRQ_REGS]; - u8 irq_masks_cache[NUM_IRQ_REGS]; int type; bool wakeup; }; diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h index ad2252f..4e94dc6 100644 --- a/include/linux/mfd/samsung/s2mps11.h +++ b/include/linux/mfd/samsung/s2mps11.h @@ -189,6 +189,7 @@ enum s2mps11_regulators { #define S2MPS11_ENABLE_SHIFT 0x06 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1) #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1) +#define S2MPS11_RAMP_DELAY 25000 /* uV/us */ #define S2MPS11_PMIC_EN_SHIFT 6 #define S2MPS11_REGULATOR_MAX (S2MPS11_REG_MAX - 3) diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h index dab34a1..b6bdcd6 100644 --- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h +++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h @@ -103,15 +103,15 @@ #define IMX6Q_GPR1_EXC_MON_MASK BIT(22) #define IMX6Q_GPR1_EXC_MON_OKAY 0x0 #define IMX6Q_GPR1_EXC_MON_SLVE BIT(22) -#define IMX6Q_GPR1_MIPI_IPU2_SEL_MASK BIT(21) -#define IMX6Q_GPR1_MIPI_IPU2_SEL_GASKET 0x0 -#define IMX6Q_GPR1_MIPI_IPU2_SEL_IOMUX BIT(21) -#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(20) -#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0 -#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(20) -#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(19) +#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21) +#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0 +#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21) +#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20) #define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0 -#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(19) +#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20) +#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19) +#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0 +#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19) #define IMX6Q_GPR1_PCIE_TEST_PD BIT(18) #define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17) #define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0 @@ -279,41 +279,88 @@ #define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29) #define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28) #define IMX6Q_GPR13_ENET_STOP_REQ BIT(27) -#define IMX6Q_GPR13_SATA_PHY_8_MASK (0x7 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_0_5_DB (0x0 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_1_0_DB (0x1 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_1_5_DB (0x2 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_2_0_DB (0x3 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_2_5_DB (0x4 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_3_0_DB (0x5 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_3_5_DB (0x6 << 24) -#define IMX6Q_GPR13_SATA_PHY_8_4_0_DB (0x7 << 24) -#define IMX6Q_GPR13_SATA_PHY_7_MASK (0x1f << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA1I (0x10 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA1M (0x10 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA1X (0x1a << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA2I (0x12 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA2M (0x12 << 19) -#define IMX6Q_GPR13_SATA_PHY_7_SATA2X (0x1a << 19) -#define IMX6Q_GPR13_SATA_PHY_6_MASK (0x7 << 16) -#define IMX6Q_GPR13_SATA_SPEED_MASK BIT(15) -#define IMX6Q_GPR13_SATA_SPEED_1P5G 0x0 -#define IMX6Q_GPR13_SATA_SPEED_3P0G BIT(15) -#define IMX6Q_GPR13_SATA_PHY_5 BIT(14) -#define IMX6Q_GPR13_SATA_PHY_4_MASK (0x7 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_16_16 (0x0 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_14_16 (0x1 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_12_16 (0x2 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_10_16 (0x3 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_9_16 (0x4 << 11) -#define IMX6Q_GPR13_SATA_PHY_4_8_16 (0x5 << 11) -#define IMX6Q_GPR13_SATA_PHY_3_MASK (0xf << 7) -#define IMX6Q_GPR13_SATA_PHY_3_OFF 0x7 -#define IMX6Q_GPR13_SATA_PHY_2_MASK (0x1f << 2) -#define IMX6Q_GPR13_SATA_PHY_2_OFF 0x2 -#define IMX6Q_GPR13_SATA_PHY_1_MASK (0x3 << 0) -#define IMX6Q_GPR13_SATA_PHY_1_FAST (0x0 << 0) -#define IMX6Q_GPR13_SATA_PHY_1_MED (0x1 << 0) -#define IMX6Q_GPR13_SATA_PHY_1_SLOW (0x2 << 0) - +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24) +#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19) +#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16) +#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16) +#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15) +#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0 +#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15) +#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14) +#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11) +#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11) +#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7) +#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7) +#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2) +#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2) +#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1) +#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0) #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index c79ad5d..8d73fe2 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -30,8 +30,8 @@ #define REG_IDLECONFIG 0x058 #define REG_CHARGECONFIG 0x05C #define REG_CHARGEDELAY 0x060 -#define REG_STEPCONFIG(n) (0x64 + ((n - 1) * 8)) -#define REG_STEPDELAY(n) (0x68 + ((n - 1) * 8)) +#define REG_STEPCONFIG(n) (0x64 + ((n) * 8)) +#define REG_STEPDELAY(n) (0x68 + ((n) * 8)) #define REG_FIFO0CNT 0xE4 #define REG_FIFO0THR 0xE8 #define REG_FIFO1CNT 0xF0 @@ -46,8 +46,6 @@ /* Step Enable */ #define STEPENB_MASK (0x1FFFF << 0) #define STEPENB(val) ((val) << 0) -#define STPENB_STEPENB STEPENB(0x1FFFF) -#define STPENB_STEPENB_TC STEPENB(0x1FFF) /* IRQ enable */ #define IRQENB_HW_PEN BIT(0) @@ -73,8 +71,6 @@ #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) #define STEPCONFIG_INP_MASK (0xF << 19) #define STEPCONFIG_INP(val) ((val) << 19) -#define STEPCONFIG_INP_AN2 STEPCONFIG_INP(2) -#define STEPCONFIG_INP_AN3 STEPCONFIG_INP(3) #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) #define STEPCONFIG_FIFO1 BIT(26) @@ -96,7 +92,6 @@ #define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1) #define STEPCHARGE_INP_MASK (0xF << 19) #define STEPCHARGE_INP(val) ((val) << 19) -#define STEPCHARGE_INP_AN1 STEPCHARGE_INP(1) #define STEPCHARGE_RFM_MASK (3 << 23) #define STEPCHARGE_RFM(val) ((val) << 23) #define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1) @@ -125,22 +120,17 @@ #define TSCADC_CELLS 2 -enum tscadc_cells { - TSC_CELL, - ADC_CELL, -}; - -struct mfd_tscadc_board { - struct tsc_data *tsc_init; - struct adc_data *adc_init; -}; - struct ti_tscadc_dev { struct device *dev; struct regmap *regmap_tscadc; void __iomem *tscadc_base; int irq; + int used_cells; /* 1-2 */ + int tsc_cell; /* -1 if not used */ + int adc_cell; /* -1 if not used */ struct mfd_cell cells[TSCADC_CELLS]; + u32 reg_se_cache; + spinlock_t reg_lock; /* tsc device */ struct titsc *tsc; @@ -149,4 +139,15 @@ struct ti_tscadc_dev { struct adc_device *adc; }; +static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p) +{ + struct ti_tscadc_dev **tscadc_dev = p->dev.platform_data; + + return *tscadc_dev; +} + +void am335x_tsc_se_update(struct ti_tscadc_dev *tsadc); +void am335x_tsc_se_set(struct ti_tscadc_dev *tsadc, u32 val); +void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val); + #endif diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 99bf3e6..ce35113 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h @@ -81,10 +81,15 @@ int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state); +struct dma_chan; + struct tmio_mmc_dma { void *chan_priv_tx; void *chan_priv_rx; + int slave_id_tx; + int slave_id_rx; int alignment_shift; + bool (*filter)(struct dma_chan *chan, void *arg); }; struct tmio_mmc_host; diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h index c923e486..c2ae569 100644 --- a/include/linux/mfd/tps6507x.h +++ b/include/linux/mfd/tps6507x.h @@ -163,7 +163,6 @@ struct tps6507x_dev { /* Client devices */ struct tps6507x_pmic *pmic; - struct tps6507x_ts *ts; }; #endif /* __LINUX_MFD_TPS6507X_H */ diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index ae5c249..40854ac 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h @@ -29,6 +29,7 @@ enum wm8994_type { struct regulator_dev; struct regulator_bulk_data; +struct irq_domain; #define WM8994_NUM_GPIO_REGS 11 #define WM8994_NUM_LDO_REGS 2 @@ -73,6 +74,7 @@ struct wm8994 { int irq; struct regmap_irq_chip_data *irq_data; + struct irq_domain *edge_irq; /* Used over suspend/resume */ bool suspended; diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index b5046f6..90c6052 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -228,6 +228,11 @@ struct wm8994_pdata { * lines is mastered. */ int max_channels_clocked[WM8994_NUM_AIF]; + + /** + * GPIO for the IRQ pin if host only supports edge triggering + */ + int irq_gpio; }; #endif diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index adf6e06..bb1c809 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -111,6 +111,7 @@ enum { MLX4_CMD_INIT2INIT_QP = 0x2d, MLX4_CMD_SUSPEND_QP = 0x32, MLX4_CMD_UNSUSPEND_QP = 0x33, + MLX4_CMD_UPDATE_QP = 0x61, /* special QP and management commands */ MLX4_CMD_CONF_SPECIAL_QP = 0x23, MLX4_CMD_MAD_IFC = 0x24, @@ -237,7 +238,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac); int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos); int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); - +int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); #define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8) diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index a51b013..52c23a8 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -157,7 +157,8 @@ enum { MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN = 1LL << 4, MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, - MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7 + MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8 }; enum { diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 352eec9..262deac 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -152,6 +152,8 @@ enum { /* fl */ }; enum { /* vlan_control */ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6, + MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */ + MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4, MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2, MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0 @@ -206,6 +208,40 @@ struct mlx4_qp_context { u32 reserved5[10]; }; +struct mlx4_update_qp_context { + __be64 qp_mask; + __be64 primary_addr_path_mask; + __be64 secondary_addr_path_mask; + u64 reserved1; + struct mlx4_qp_context qp_context; + u64 reserved2[58]; +}; + +enum { + MLX4_UPD_QP_MASK_PM_STATE = 32, + MLX4_UPD_QP_MASK_VSD = 33, +}; + +enum { + MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32, + MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32, + MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32, + MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32, + MLX4_UPD_QP_PATH_MASK_CV = 4 + 32, + MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32, + MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32, + MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32, + MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32, + MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32, + MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32, +}; + enum { /* param3 */ MLX4_STRIP_VLAN = 1 << 30 }; diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h new file mode 100644 index 0000000..2826a4b --- /dev/null +++ b/include/linux/mlx5/cmd.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CMD_H +#define MLX5_CMD_H + +#include <linux/types.h> + +struct manage_pages_layout { + u64 ptr; + u32 reserved; + u16 num_entries; + u16 func_id; +}; + + +struct mlx5_cmd_alloc_uar_imm_out { + u32 rsvd[3]; + u32 uarn; +}; + +#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h new file mode 100644 index 0000000..3db67f7 --- /dev/null +++ b/include/linux/mlx5/cq.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_CORE_CQ_H +#define MLX5_CORE_CQ_H + +#include <rdma/ib_verbs.h> +#include <linux/mlx5/driver.h> + + +struct mlx5_core_cq { + u32 cqn; + int cqe_sz; + __be32 *set_ci_db; + __be32 *arm_db; + atomic_t refcount; + struct completion free; + unsigned vector; + int irqn; + void (*comp) (struct mlx5_core_cq *); + void (*event) (struct mlx5_core_cq *, enum mlx5_event); + struct mlx5_uar *uar; + u32 cons_index; + unsigned arm_sn; + struct mlx5_rsc_debug *dbg; + int pid; +}; + + +enum { + MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, + MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, + MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, + MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, + MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06, + MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10, + MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, + MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, + MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, + MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, + MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, +}; + +enum { + MLX5_CQE_OWNER_MASK = 1, + MLX5_CQE_REQ = 0, + MLX5_CQE_RESP_WR_IMM = 1, + MLX5_CQE_RESP_SEND = 2, + MLX5_CQE_RESP_SEND_IMM = 3, + MLX5_CQE_RESP_SEND_INV = 4, + MLX5_CQE_RESIZE_CQ = 0xff, /* TBD */ + MLX5_CQE_REQ_ERR = 13, + MLX5_CQE_RESP_ERR = 14, +}; + +enum { + MLX5_CQ_MODIFY_RESEIZE = 0, + MLX5_CQ_MODIFY_MODER = 1, + MLX5_CQ_MODIFY_MAPPING = 2, +}; + +struct mlx5_cq_modify_params { + int type; + union { + struct { + u32 page_offset; + u8 log_cq_size; + } resize; + + struct { + } moder; + + struct { + } mapping; + } params; +}; + +enum { + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, +}; + +static inline int cqe_sz_to_mlx_sz(u8 size) +{ + return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; +} + +static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) +{ + *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); +} + +enum { + MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24, + MLX5_CQ_DB_REQ_NOT = 0 << 24 +}; + +static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, + void __iomem *uar_page, + spinlock_t *doorbell_lock) +{ + __be32 doorbell[2]; + u32 sn; + u32 ci; + + sn = cq->arm_sn & 3; + ci = cq->cons_index & 0xffffff; + + *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); + doorbell[1] = cpu_to_be32(cq->cqn); + + mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); +} + +int mlx5_init_cq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); +int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_create_cq_mbox_in *in, int inlen); +int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); +int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + struct mlx5_query_cq_mbox_out *out); +int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + int type, struct mlx5_cq_modify_params *params); +int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); +void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); + +#endif /* MLX5_CORE_CQ_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h new file mode 100644 index 0000000..737685e --- /dev/null +++ b/include/linux/mlx5/device.h @@ -0,0 +1,913 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DEVICE_H +#define MLX5_DEVICE_H + +#include <linux/types.h> +#include <rdma/ib_verbs.h> + +#if defined(__LITTLE_ENDIAN) +#define MLX5_SET_HOST_ENDIANNESS 0 +#elif defined(__BIG_ENDIAN) +#define MLX5_SET_HOST_ENDIANNESS 0x80 +#else +#error Host endianness not defined +#endif + +enum { + MLX5_MAX_COMMANDS = 32, + MLX5_CMD_DATA_BLOCK_SIZE = 512, + MLX5_PCI_CMD_XPORT = 7, +}; + +enum { + MLX5_EXTENDED_UD_AV = 0x80000000, +}; + +enum { + MLX5_CQ_STATE_ARMED = 9, + MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, + MLX5_CQ_STATE_FIRED = 0xa, +}; + +enum { + MLX5_STAT_RATE_OFFSET = 5, +}; + +enum { + MLX5_INLINE_SEG = 0x80000000, +}; + +enum { + MLX5_PERM_LOCAL_READ = 1 << 2, + MLX5_PERM_LOCAL_WRITE = 1 << 3, + MLX5_PERM_REMOTE_READ = 1 << 4, + MLX5_PERM_REMOTE_WRITE = 1 << 5, + MLX5_PERM_ATOMIC = 1 << 6, + MLX5_PERM_UMR_EN = 1 << 7, +}; + +enum { + MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, + MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, + MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, + MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, + MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, +}; + +enum { + MLX5_ACCESS_MODE_PA = 0, + MLX5_ACCESS_MODE_MTT = 1, + MLX5_ACCESS_MODE_KLM = 2 +}; + +enum { + MLX5_MKEY_REMOTE_INVAL = 1 << 24, + MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, + MLX5_MKEY_BSF_EN = 1 << 30, + MLX5_MKEY_LEN64 = 1 << 31, +}; + +enum { + MLX5_EN_RD = (u64)1, + MLX5_EN_WR = (u64)2 +}; + +enum { + MLX5_BF_REGS_PER_PAGE = 4, + MLX5_MAX_UAR_PAGES = 1 << 8, + MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_BF_REGS_PER_PAGE, +}; + +enum { + MLX5_MKEY_MASK_LEN = 1ull << 0, + MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, + MLX5_MKEY_MASK_START_ADDR = 1ull << 6, + MLX5_MKEY_MASK_PD = 1ull << 7, + MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, + MLX5_MKEY_MASK_BSF_EN = 1ull << 12, + MLX5_MKEY_MASK_KEY = 1ull << 13, + MLX5_MKEY_MASK_QPN = 1ull << 14, + MLX5_MKEY_MASK_LR = 1ull << 17, + MLX5_MKEY_MASK_LW = 1ull << 18, + MLX5_MKEY_MASK_RR = 1ull << 19, + MLX5_MKEY_MASK_RW = 1ull << 20, + MLX5_MKEY_MASK_A = 1ull << 21, + MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, + MLX5_MKEY_MASK_FREE = 1ull << 29, +}; + +enum mlx5_event { + MLX5_EVENT_TYPE_COMP = 0x0, + + MLX5_EVENT_TYPE_PATH_MIG = 0x01, + MLX5_EVENT_TYPE_COMM_EST = 0x02, + MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, + MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, + MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, + + MLX5_EVENT_TYPE_CQ_ERROR = 0x04, + MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + + MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, + MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, + MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, + MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, + + MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, + MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, + + MLX5_EVENT_TYPE_CMD = 0x0a, + MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, +}; + +enum { + MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, + MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, + MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, + MLX5_PORT_CHANGE_SUBTYPE_LID = 6, + MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, + MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, + MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, +}; + +enum { + MLX5_DEV_CAP_FLAG_RC = 1LL << 0, + MLX5_DEV_CAP_FLAG_UC = 1LL << 1, + MLX5_DEV_CAP_FLAG_UD = 1LL << 2, + MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, + MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6, + MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + MLX5_DEV_CAP_FLAG_APM = 1LL << 17, + MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, + MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, + MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, + MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, +}; + +enum { + MLX5_OPCODE_NOP = 0x00, + MLX5_OPCODE_SEND_INVAL = 0x01, + MLX5_OPCODE_RDMA_WRITE = 0x08, + MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, + MLX5_OPCODE_SEND = 0x0a, + MLX5_OPCODE_SEND_IMM = 0x0b, + MLX5_OPCODE_RDMA_READ = 0x10, + MLX5_OPCODE_ATOMIC_CS = 0x11, + MLX5_OPCODE_ATOMIC_FA = 0x12, + MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, + MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, + MLX5_OPCODE_BIND_MW = 0x18, + MLX5_OPCODE_CONFIG_CMD = 0x1f, + + MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, + MLX5_RECV_OPCODE_SEND = 0x01, + MLX5_RECV_OPCODE_SEND_IMM = 0x02, + MLX5_RECV_OPCODE_SEND_INVAL = 0x03, + + MLX5_CQE_OPCODE_ERROR = 0x1e, + MLX5_CQE_OPCODE_RESIZE = 0x16, + + MLX5_OPCODE_SET_PSV = 0x20, + MLX5_OPCODE_GET_PSV = 0x21, + MLX5_OPCODE_CHECK_PSV = 0x22, + MLX5_OPCODE_RGET_PSV = 0x26, + MLX5_OPCODE_RCHECK_PSV = 0x27, + + MLX5_OPCODE_UMR = 0x25, + +}; + +enum { + MLX5_SET_PORT_RESET_QKEY = 0, + MLX5_SET_PORT_GUID0 = 16, + MLX5_SET_PORT_NODE_GUID = 17, + MLX5_SET_PORT_SYS_GUID = 18, + MLX5_SET_PORT_GID_TABLE = 19, + MLX5_SET_PORT_PKEY_TABLE = 20, +}; + +enum { + MLX5_MAX_PAGE_SHIFT = 31 +}; + +struct mlx5_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 opmod; +}; + +struct mlx5_outbox_hdr { + u8 status; + u8 rsvd[3]; + __be32 syndrome; +}; + +struct mlx5_cmd_query_adapter_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cmd_query_adapter_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[24]; + u8 intapin; + u8 rsvd1[13]; + __be16 vsd_vendor_id; + u8 vsd[208]; + u8 vsd_psid[16]; +}; + +struct mlx5_hca_cap { + u8 rsvd1[16]; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + u8 rsvd2; + u8 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 rsvd5; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 rsvd7; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 rsvd10; + u8 log_max_ra_res_qp; + u8 rsvd11[4]; + __be16 max_qp_count; + __be16 rsvd12; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 rsvd14; + u8 num_ports; + u8 log_max_msg; + u8 rsvd15[3]; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + u8 rsvd19[4]; + __be16 max_desc_sz_sq; + u8 rsvd20[2]; + __be16 max_desc_sz_rq; + u8 rsvd21[2]; + __be16 max_desc_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[42]; + __be16 log_uar_page_sz; + u8 rsvd27[28]; + u8 log_msx_atomic_size_qp; + u8 rsvd28[2]; + u8 log_msx_atomic_size_dc; + u8 rsvd29[76]; +}; + + +struct mlx5_cmd_query_hca_cap_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + + +struct mlx5_cmd_query_hca_cap_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; + struct mlx5_hca_cap hca_cap; +}; + + +struct mlx5_cmd_set_hca_cap_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; + struct mlx5_hca_cap hca_cap; +}; + + +struct mlx5_cmd_set_hca_cap_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + + +struct mlx5_cmd_init_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 profile; + u8 rsvd1[4]; +}; + +struct mlx5_cmd_init_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cmd_teardown_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 profile; + u8 rsvd1[4]; +}; + +struct mlx5_cmd_teardown_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cmd_layout { + u8 type; + u8 rsvd0[3]; + __be32 inlen; + __be64 in_ptr; + __be32 in[4]; + __be32 out[4]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 rsvd1; + u8 status_own; +}; + + +struct health_buffer { + __be32 assert_var[5]; + __be32 rsvd0[3]; + __be32 assert_exit_ptr; + __be32 assert_callra; + __be32 rsvd1[2]; + __be32 fw_ver; + __be32 hw_id; + __be32 rsvd2; + u8 irisc_index; + u8 synd; + __be16 ext_sync; +}; + +struct mlx5_init_seg { + __be32 fw_rev; + __be32 cmdif_rev_fw_sub; + __be32 rsvd0[2]; + __be32 cmdq_addr_h; + __be32 cmdq_addr_l_sz; + __be32 cmd_dbell; + __be32 rsvd1[121]; + struct health_buffer health; + __be32 rsvd2[884]; + __be32 health_counter; + __be32 rsvd3[1023]; + __be64 ieee1588_clk; + __be32 ieee1588_clk_type; + __be32 clr_intx; +}; + +struct mlx5_eqe_comp { + __be32 reserved[6]; + __be32 cqn; +}; + +struct mlx5_eqe_qp_srq { + __be32 reserved[6]; + __be32 qp_srq_n; +}; + +struct mlx5_eqe_cq_err { + __be32 cqn; + u8 reserved1[7]; + u8 syndrome; +}; + +struct mlx5_eqe_dropped_packet { +}; + +struct mlx5_eqe_port_state { + u8 reserved0[8]; + u8 port; +}; + +struct mlx5_eqe_gpio { + __be32 reserved0[2]; + __be64 gpio_event; +}; + +struct mlx5_eqe_congestion { + u8 type; + u8 rsvd0; + u8 congestion_level; +}; + +struct mlx5_eqe_stall_vl { + u8 rsvd0[3]; + u8 port_vl; +}; + +struct mlx5_eqe_cmd { + __be32 vector; + __be32 rsvd[6]; +}; + +struct mlx5_eqe_page_req { + u8 rsvd0[2]; + __be16 func_id; + u8 rsvd1[2]; + __be16 num_pages; + __be32 rsvd2[5]; +}; + +union ev_data { + __be32 raw[7]; + struct mlx5_eqe_cmd cmd; + struct mlx5_eqe_comp comp; + struct mlx5_eqe_qp_srq qp_srq; + struct mlx5_eqe_cq_err cq_err; + struct mlx5_eqe_dropped_packet dp; + struct mlx5_eqe_port_state port; + struct mlx5_eqe_gpio gpio; + struct mlx5_eqe_congestion cong; + struct mlx5_eqe_stall_vl stall_vl; + struct mlx5_eqe_page_req req_pages; +} __packed; + +struct mlx5_eqe { + u8 rsvd0; + u8 type; + u8 rsvd1; + u8 sub_type; + __be32 rsvd2[7]; + union ev_data data; + __be16 rsvd3; + u8 signature; + u8 owner; +} __packed; + +struct mlx5_cmd_prot_block { + u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 rsvd1; + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +struct mlx5_err_cqe { + u8 rsvd0[32]; + __be32 srqn; + u8 rsvd1[18]; + u8 vendor_err_synd; + u8 syndrome; + __be32 s_wqe_opcode_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +}; + +struct mlx5_cqe64 { + u8 rsvd0[17]; + u8 ml_path; + u8 rsvd20[4]; + __be16 slid; + __be32 flags_rqpn; + u8 rsvd28[4]; + __be32 srqn; + __be32 imm_inval_pkey; + u8 rsvd40[4]; + __be32 byte_cnt; + __be64 timestamp; + __be32 sop_drop_qpn; + __be16 wqe_counter; + u8 signature; + u8 op_own; +}; + +struct mlx5_wqe_srq_next_seg { + u8 rsvd0[2]; + __be16 next_wqe_index; + u8 signature; + u8 rsvd1[11]; +}; + +union mlx5_ext_cqe { + struct ib_grh grh; + u8 inl[64]; +}; + +struct mlx5_cqe128 { + union mlx5_ext_cqe inl_grh; + struct mlx5_cqe64 cqe64; +}; + +struct mlx5_srq_ctx { + u8 state_log_sz; + u8 rsvd0[3]; + __be32 flags_xrcd; + __be32 pgoff_cqn; + u8 rsvd1[4]; + u8 log_pg_sz; + u8 rsvd2[7]; + __be32 pd; + __be16 lwm; + __be16 wqe_cnt; + u8 rsvd3[8]; + __be64 db_record; +}; + +struct mlx5_create_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_srqn; + u8 rsvd0[4]; + struct mlx5_srq_ctx ctx; + u8 rsvd1[208]; + __be64 pas[0]; +}; + +struct mlx5_create_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 srqn; + u8 rsvd[4]; +}; + +struct mlx5_destroy_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 srqn; + u8 rsvd[4]; +}; + +struct mlx5_destroy_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 srqn; + u8 rsvd0[4]; +}; + +struct mlx5_query_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; + struct mlx5_srq_ctx ctx; + u8 rsvd1[32]; + __be64 pas[0]; +}; + +struct mlx5_arm_srq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 srqn; + __be16 rsvd; + __be16 lwm; +}; + +struct mlx5_arm_srq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_cq_context { + u8 status; + u8 cqe_sz_flags; + u8 st; + u8 rsvd3; + u8 rsvd4[6]; + __be16 page_offset; + __be32 log_sz_usr_page; + __be16 cq_period; + __be16 cq_max_count; + __be16 rsvd20; + __be16 c_eqn; + u8 log_pg_sz; + u8 rsvd25[7]; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_counter; + __be32 producer_counter; + u8 rsvd48[8]; + __be64 db_record_addr; +}; + +struct mlx5_create_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_cqn; + u8 rsvdx[4]; + struct mlx5_cq_context ctx; + u8 rsvd6[192]; + __be64 pas[0]; +}; + +struct mlx5_create_cq_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_cq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + +struct mlx5_query_cq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct mlx5_query_cq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; + struct mlx5_cq_context ctx; + u8 rsvd6[16]; + __be64 pas[0]; +}; + +struct mlx5_enable_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_enable_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_disable_hca_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_disable_hca_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_eq_context { + u8 status; + u8 ec_oi; + u8 st; + u8 rsvd2[7]; + __be16 page_pffset; + __be32 log_sz_usr_page; + u8 rsvd3[7]; + u8 intr; + u8 log_page_size; + u8 rsvd4[15]; + __be32 consumer_counter; + __be32 produser_counter; + u8 rsvd5[16]; +}; + +struct mlx5_create_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[3]; + u8 input_eqn; + u8 rsvd1[4]; + struct mlx5_eq_context ctx; + u8 rsvd2[8]; + __be64 events_mask; + u8 rsvd3[176]; + __be64 pas[0]; +}; + +struct mlx5_create_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[3]; + u8 eq_number; + u8 rsvd1[4]; +}; + +struct mlx5_destroy_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct mlx5_destroy_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_map_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + __be64 mask; + u8 mu; + u8 rsvd0[2]; + u8 eqn; + u8 rsvd1[24]; +}; + +struct mlx5_map_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_eq_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct mlx5_query_eq_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; + struct mlx5_eq_context ctx; +}; + +struct mlx5_mkey_seg { + /* This is a two bit field occupying bits 31-30. + * bit 31 is always 0, + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + */ + u8 status; + u8 pcie_control; + u8 flags; + u8 version; + __be32 qpn_mkey7_0; + u8 rsvd1[4]; + __be32 flags_pd; + __be64 start_addr; + __be64 len; + __be32 bsfs_octo_size; + u8 rsvd2[16]; + __be32 xlt_oct_size; + u8 rsvd3[3]; + u8 log2_page_size; + u8 rsvd4[4]; +}; + +struct mlx5_query_special_ctxs_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_special_ctxs_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 dump_fill_mkey; + __be32 reserved_lkey; +}; + +struct mlx5_create_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_mkey_index; + u8 rsvd0[4]; + struct mlx5_mkey_seg seg; + u8 rsvd1[16]; + __be32 xlat_oct_act_size; + __be32 bsf_coto_act_size; + u8 rsvd2[168]; + __be64 pas[0]; +}; + +struct mlx5_create_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct mlx5_destroy_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct mlx5_destroy_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_query_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 mkey; +}; + +struct mlx5_query_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + __be64 pas[0]; +}; + +struct mlx5_modify_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 mkey; + __be64 pas[0]; +}; + +struct mlx5_modify_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; +}; + +struct mlx5_dump_mkey_mbox_in { + struct mlx5_inbox_hdr hdr; +}; + +struct mlx5_dump_mkey_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 mkey; +}; + +struct mlx5_mad_ifc_mbox_in { + struct mlx5_inbox_hdr hdr; + __be16 remote_lid; + u8 rsvd0; + u8 port; + u8 rsvd1[4]; + u8 data[256]; +}; + +struct mlx5_mad_ifc_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[256]; +}; + +struct mlx5_access_reg_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 register_id; + __be32 arg; + __be32 data[0]; +}; + +struct mlx5_access_reg_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; + __be32 data[0]; +}; + +#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum { + MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 +}; + +#endif /* MLX5_DEVICE_H */ diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h new file mode 100644 index 0000000..163a818 --- /dev/null +++ b/include/linux/mlx5/doorbell.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DOORBELL_H +#define MLX5_DOORBELL_H + +#define MLX5_BF_OFFSET 0x800 +#define MLX5_CQ_DOORBELL 0x20 + +#if BITS_PER_LONG == 64 +/* Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) +#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *)val, dest); +} + +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mlx5_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32) val[0], dest); + __raw_writel((__force u32) val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* MLX5_DOORBELL_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h new file mode 100644 index 0000000..2aa258b --- /dev/null +++ b/include/linux/mlx5/driver.h @@ -0,0 +1,771 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_DRIVER_H +#define MLX5_DRIVER_H + +#include <linux/kernel.h> +#include <linux/completion.h> +#include <linux/pci.h> +#include <linux/spinlock_types.h> +#include <linux/semaphore.h> +#include <linux/vmalloc.h> +#include <linux/radix-tree.h> +#include <linux/mlx5/device.h> +#include <linux/mlx5/doorbell.h> + +enum { + MLX5_BOARD_ID_LEN = 64, + MLX5_MAX_NAME_LEN = 16, +}; + +enum { + /* one minute for the sake of bringup. Generally, commands must always + * complete and we may need to increase this timeout value + */ + MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000, + MLX5_CMD_WQ_MAX_NAME = 32, +}; + +enum { + CMD_OWNER_SW = 0x0, + CMD_OWNER_HW = 0x1, + CMD_STATUS_SUCCESS = 0, +}; + +enum mlx5_sqp_t { + MLX5_SQP_SMI = 0, + MLX5_SQP_GSI = 1, + MLX5_SQP_IEEE_1588 = 2, + MLX5_SQP_SNIFFER = 3, + MLX5_SQP_SYNC_UMR = 4, +}; + +enum { + MLX5_MAX_PORTS = 2, +}; + +enum { + MLX5_EQ_VEC_PAGES = 0, + MLX5_EQ_VEC_CMD = 1, + MLX5_EQ_VEC_ASYNC = 2, + MLX5_EQ_VEC_COMP_BASE, +}; + +enum { + MLX5_MAX_EQ_NAME = 20 +}; + +enum { + MLX5_ATOMIC_MODE_IB_COMP = 1 << 16, + MLX5_ATOMIC_MODE_CX = 2 << 16, + MLX5_ATOMIC_MODE_8B = 3 << 16, + MLX5_ATOMIC_MODE_16B = 4 << 16, + MLX5_ATOMIC_MODE_32B = 5 << 16, + MLX5_ATOMIC_MODE_64B = 6 << 16, + MLX5_ATOMIC_MODE_128B = 7 << 16, + MLX5_ATOMIC_MODE_256B = 8 << 16, +}; + +enum { + MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, + MLX5_CMD_OP_QUERY_ADAPTER = 0x101, + MLX5_CMD_OP_INIT_HCA = 0x102, + MLX5_CMD_OP_TEARDOWN_HCA = 0x103, + MLX5_CMD_OP_ENABLE_HCA = 0x104, + MLX5_CMD_OP_DISABLE_HCA = 0x105, + MLX5_CMD_OP_QUERY_PAGES = 0x107, + MLX5_CMD_OP_MANAGE_PAGES = 0x108, + MLX5_CMD_OP_SET_HCA_CAP = 0x109, + + MLX5_CMD_OP_CREATE_MKEY = 0x200, + MLX5_CMD_OP_QUERY_MKEY = 0x201, + MLX5_CMD_OP_DESTROY_MKEY = 0x202, + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + + MLX5_CMD_OP_CREATE_EQ = 0x301, + MLX5_CMD_OP_DESTROY_EQ = 0x302, + MLX5_CMD_OP_QUERY_EQ = 0x303, + + MLX5_CMD_OP_CREATE_CQ = 0x400, + MLX5_CMD_OP_DESTROY_CQ = 0x401, + MLX5_CMD_OP_QUERY_CQ = 0x402, + MLX5_CMD_OP_MODIFY_CQ = 0x403, + + MLX5_CMD_OP_CREATE_QP = 0x500, + MLX5_CMD_OP_DESTROY_QP = 0x501, + MLX5_CMD_OP_RST2INIT_QP = 0x502, + MLX5_CMD_OP_INIT2RTR_QP = 0x503, + MLX5_CMD_OP_RTR2RTS_QP = 0x504, + MLX5_CMD_OP_RTS2RTS_QP = 0x505, + MLX5_CMD_OP_SQERR2RTS_QP = 0x506, + MLX5_CMD_OP_2ERR_QP = 0x507, + MLX5_CMD_OP_RTS2SQD_QP = 0x508, + MLX5_CMD_OP_SQD2RTS_QP = 0x509, + MLX5_CMD_OP_2RST_QP = 0x50a, + MLX5_CMD_OP_QUERY_QP = 0x50b, + MLX5_CMD_OP_CONF_SQP = 0x50c, + MLX5_CMD_OP_MAD_IFC = 0x50d, + MLX5_CMD_OP_INIT2INIT_QP = 0x50e, + MLX5_CMD_OP_SUSPEND_QP = 0x50f, + MLX5_CMD_OP_UNSUSPEND_QP = 0x510, + MLX5_CMD_OP_SQD2SQD_QP = 0x511, + MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, + MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, + MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, + + MLX5_CMD_OP_CREATE_PSV = 0x600, + MLX5_CMD_OP_DESTROY_PSV = 0x601, + MLX5_CMD_OP_QUERY_PSV = 0x602, + MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, + MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, + + MLX5_CMD_OP_CREATE_SRQ = 0x700, + MLX5_CMD_OP_DESTROY_SRQ = 0x701, + MLX5_CMD_OP_QUERY_SRQ = 0x702, + MLX5_CMD_OP_ARM_RQ = 0x703, + MLX5_CMD_OP_RESIZE_SRQ = 0x704, + + MLX5_CMD_OP_ALLOC_PD = 0x800, + MLX5_CMD_OP_DEALLOC_PD = 0x801, + MLX5_CMD_OP_ALLOC_UAR = 0x802, + MLX5_CMD_OP_DEALLOC_UAR = 0x803, + + MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, + MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, + + + MLX5_CMD_OP_ALLOC_XRCD = 0x80e, + MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, + + MLX5_CMD_OP_ACCESS_REG = 0x805, + MLX5_CMD_OP_MAX = 0x810, +}; + +enum { + MLX5_REG_PCAP = 0x5001, + MLX5_REG_PMTU = 0x5003, + MLX5_REG_PTYS = 0x5004, + MLX5_REG_PAOS = 0x5006, + MLX5_REG_PMAOS = 0x5012, + MLX5_REG_PUDE = 0x5009, + MLX5_REG_PMPE = 0x5010, + MLX5_REG_PELC = 0x500e, + MLX5_REG_PMLP = 0, /* TBD */ + MLX5_REG_NODE_DESC = 0x6001, + MLX5_REG_HOST_ENDIANNESS = 0x7004, +}; + +enum dbg_rsc_type { + MLX5_DBG_RSC_QP, + MLX5_DBG_RSC_EQ, + MLX5_DBG_RSC_CQ, +}; + +struct mlx5_field_desc { + struct dentry *dent; + int i; +}; + +struct mlx5_rsc_debug { + struct mlx5_core_dev *dev; + void *object; + enum dbg_rsc_type type; + struct dentry *root; + struct mlx5_field_desc fields[0]; +}; + +enum mlx5_dev_event { + MLX5_DEV_EVENT_SYS_ERROR, + MLX5_DEV_EVENT_PORT_UP, + MLX5_DEV_EVENT_PORT_DOWN, + MLX5_DEV_EVENT_PORT_INITIALIZED, + MLX5_DEV_EVENT_LID_CHANGE, + MLX5_DEV_EVENT_PKEY_CHANGE, + MLX5_DEV_EVENT_GUID_CHANGE, + MLX5_DEV_EVENT_CLIENT_REREG, +}; + +struct mlx5_uuar_info { + struct mlx5_uar *uars; + int num_uars; + int num_low_latency_uuars; + unsigned long *bitmap; + unsigned int *count; + struct mlx5_bf *bfs; + + /* + * protect uuar allocation data structs + */ + struct mutex lock; +}; + +struct mlx5_bf { + void __iomem *reg; + void __iomem *regreg; + int buf_size; + struct mlx5_uar *uar; + unsigned long offset; + int need_lock; + /* protect blue flame buffer selection when needed + */ + spinlock_t lock; + + /* serialize 64 bit writes when done as two 32 bit accesses + */ + spinlock_t lock32; + int uuarn; +}; + +struct mlx5_cmd_first { + __be32 data[4]; +}; + +struct mlx5_cmd_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct mlx5_cmd_first first; + struct mlx5_cmd_mailbox *next; +}; + +struct mlx5_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct cache_ent { + /* protect block chain allocations + */ + spinlock_t lock; + struct list_head head; +}; + +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + +}; + +struct mlx5_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct mlx5_cmd { + void *cmd_buf; + dma_addr_t dma; + u16 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + /* protect command queue allocations + */ + spinlock_t alloc_lock; + + /* protect token allocations + */ + spinlock_t token_lock; + u8 token; + unsigned long bitmask; + char wq_name[MLX5_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct semaphore sem; + struct semaphore pages_sem; + int mode; + struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; + struct pci_pool *pool; + struct mlx5_cmd_debug dbg; + struct cmd_msg_cache cache; + int checksum_disabled; + struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; +}; + +struct mlx5_port_caps { + int gid_table_len; + int pkey_table_len; +}; + +struct mlx5_caps { + u8 log_max_eq; + u8 log_max_cq; + u8 log_max_qp; + u8 log_max_mkey; + u8 log_max_pd; + u8 log_max_srq; + u32 max_cqes; + int max_wqes; + int max_sq_desc_sz; + int max_rq_desc_sz; + u64 flags; + u16 stat_rate_support; + int log_max_msg; + int num_ports; + int max_ra_res_qp; + int max_ra_req_qp; + int max_srq_wqes; + int bf_reg_size; + int bf_regs_per_page; + struct mlx5_port_caps port[MLX5_MAX_PORTS]; + u8 ext_port_cap[MLX5_MAX_PORTS]; + int max_vf; + u32 reserved_lkey; + u8 local_ca_ack_delay; + u8 log_max_mcg; + u16 max_qp_mcg; + int min_page_sz; +}; + +struct mlx5_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct mlx5_cmd_mailbox *next; +}; + +struct mlx5_buf_list { + void *buf; + dma_addr_t map; +}; + +struct mlx5_buf { + struct mlx5_buf_list direct; + struct mlx5_buf_list *page_list; + int nbufs; + int npages; + int page_shift; + int size; +}; + +struct mlx5_eq { + struct mlx5_core_dev *dev; + __be32 __iomem *doorbell; + u32 cons_index; + struct mlx5_buf buf; + int size; + u8 irqn; + u8 eqn; + int nent; + u64 mask; + char name[MLX5_MAX_EQ_NAME]; + struct list_head list; + int index; + struct mlx5_rsc_debug *dbg; +}; + + +struct mlx5_core_mr { + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; +}; + +struct mlx5_core_srq { + u32 srqn; + int max; + int max_gs; + int max_avail_gather; + int wqe_shift; + void (*event) (struct mlx5_core_srq *, enum mlx5_event); + + atomic_t refcount; + struct completion free; +}; + +struct mlx5_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head *comp_eq_head; + struct mlx5_eq pages_eq; + struct mlx5_eq async_eq; + struct mlx5_eq cmd_eq; + struct msix_entry *msix_arr; + int num_comp_vectors; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct mlx5_uar { + u32 index; + struct list_head bf_list; + unsigned free_bf_bmap; + void __iomem *wc_map; + void __iomem *map; +}; + + +struct mlx5_core_health { + struct health_buffer __iomem *health; + __be32 __iomem *health_counter; + struct timer_list timer; + struct list_head list; + u32 prev; + int miss_counter; +}; + +struct mlx5_cq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_srq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct mlx5_priv { + char name[MLX5_MAX_NAME_LEN]; + struct mlx5_eq_table eq_table; + struct mlx5_uuar_info uuari; + MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); + + /* pages stuff */ + struct workqueue_struct *pg_wq; + struct rb_root page_root; + int fw_pages; + int reg_pages; + + struct mlx5_core_health health; + + struct mlx5_srq_table srq_table; + + /* start: qp staff */ + struct mlx5_qp_table qp_table; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + /* end: qp staff */ + + /* start: cq staff */ + struct mlx5_cq_table cq_table; + /* end: cq staff */ + + /* start: alloc staff */ + struct mutex pgdir_mutex; + struct list_head pgdir_list; + /* end: alloc staff */ + struct dentry *dbg_root; + + /* protect mkey key part */ + spinlock_t mkey_lock; + u8 mkey_key; +}; + +struct mlx5_core_dev { + struct pci_dev *pdev; + u8 rev_id; + char board_id[MLX5_BOARD_ID_LEN]; + struct mlx5_cmd cmd; + struct mlx5_caps caps; + phys_addr_t iseg_base; + struct mlx5_init_seg __iomem *iseg; + void (*event) (struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + void *data); + struct mlx5_priv priv; + struct mlx5_profile *profile; + atomic_t num_qps; +}; + +struct mlx5_db { + __be32 *db; + union { + struct mlx5_db_pgdir *pgdir; + struct mlx5_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; +}; + +enum { + MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES, +}; + +enum { + MLX5_COMP_EQ_SIZE = 1024, +}; + +struct mlx5_db_pgdir { + struct list_head list; + DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE); + __be32 *db_page; + dma_addr_t db_dma; +}; + +typedef void (*mlx5_cmd_cbk_t)(int status, void *context); + +struct mlx5_cmd_work_ent { + struct mlx5_cmd_msg *in; + struct mlx5_cmd_msg *out; + mlx5_cmd_cbk_t callback; + void *context; + int idx; + struct completion done; + struct mlx5_cmd *cmd; + struct work_struct work; + struct mlx5_cmd_layout *lay; + int ret; + int page_queue; + u8 status; + u8 token; + struct timespec ts1; + struct timespec ts2; +}; + +struct mlx5_pas { + u64 pa; + u8 log_sz; +}; + +static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) +{ + if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +extern struct workqueue_struct *mlx5_core_wq; + +#define STRUCT_FIELD(header, field) \ + .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ + .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field + +struct ib_field { + size_t struct_offset_bytes; + size_t struct_size_bytes; + int offset_bits; + int size_bits; +}; + +static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +extern struct dentry *mlx5_debugfs_root; + +static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->fw_rev) & 0xffff; +} + +static inline u16 fw_rev_min(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->fw_rev) >> 16; +} + +static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; +} + +static inline u16 cmdif_rev(struct mlx5_core_dev *dev) +{ + return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; +} + +static inline void *mlx5_vzalloc(unsigned long size) +{ + void *rtn; + + rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!rtn) + rtn = vzalloc(size); + return rtn; +} + +static inline void mlx5_vfree(const void *addr) +{ + if (addr && is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); +void mlx5_dev_cleanup(struct mlx5_core_dev *dev); +int mlx5_cmd_init(struct mlx5_core_dev *dev); +void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +void mlx5_cmd_use_events(struct mlx5_core_dev *dev); +void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); +int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, + int out_size); +int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); +int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); +int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); +void mlx5_health_cleanup(void); +void __init mlx5_health_init(void); +void mlx5_start_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct, + struct mlx5_buf *buf); +void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); +struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, + gfp_t flags, int npages); +void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, + struct mlx5_cmd_mailbox *head); +int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_create_srq_mbox_in *in, int inlen); +int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); +int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + struct mlx5_query_srq_mbox_out *out); +int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, + u16 lwm, int is_srq); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_create_mkey_mbox_in *in, int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + struct mlx5_query_mkey_mbox_out *out, int outlen); +int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, + u32 *mkey); +int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); +int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); +int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, + u16 opmod, int port); +void mlx5_pagealloc_init(struct mlx5_core_dev *dev); +void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); +int mlx5_pagealloc_start(struct mlx5_core_dev *dev); +void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, + s16 npages); +int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); +void mlx5_register_debugfs(void); +void mlx5_unregister_debugfs(void); +int mlx5_eq_init(struct mlx5_core_dev *dev); +void mlx5_eq_cleanup(struct mlx5_core_dev *dev); +void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); +void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); +void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); +void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); +struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); +void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); +void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type); +int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, + int nent, u64 mask, const char *name, struct mlx5_uar *uar); +int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_start_eqs(struct mlx5_core_dev *dev); +int mlx5_stop_eqs(struct mlx5_core_dev *dev); +int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); +int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); + +int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); +int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + struct mlx5_query_eq_mbox_out *out, int outlen); +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); +void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); + +typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); +int mlx5_register_health_report_handler(health_handler_t handler); +void mlx5_unregister_health_report_handler(void); +const char *mlx5_command_str(int command); +int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); + +static inline u32 mlx5_mkey_to_idx(u32 mkey) +{ + return mkey >> 8; +} + +static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << 8; +} + +enum { + MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, + MLX5_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, + MLX5_PROF_MASK_MR_CACHE = (u64)1 << 2, +}; + +enum { + MAX_MR_CACHE_ENTRIES = 16, +}; + +struct mlx5_profile { + u64 mask; + u32 log_max_qp; + int cmdif_csum; + struct { + int size; + int limit; + } mr_cache[MAX_MR_CACHE_ENTRIES]; +}; + +#endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h new file mode 100644 index 0000000..d9e3eac --- /dev/null +++ b/include/linux/mlx5/qp.h @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_QP_H +#define MLX5_QP_H + +#include <linux/mlx5/device.h> +#include <linux/mlx5/driver.h> + +#define MLX5_INVALID_LKEY 0x100 + +enum mlx5_qp_optpar { + MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MLX5_QP_OPTPAR_RRE = 1 << 1, + MLX5_QP_OPTPAR_RAE = 1 << 2, + MLX5_QP_OPTPAR_RWE = 1 << 3, + MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MLX5_QP_OPTPAR_Q_KEY = 1 << 5, + MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MLX5_QP_OPTPAR_SRA_MAX = 1 << 8, + MLX5_QP_OPTPAR_RRA_MAX = 1 << 9, + MLX5_QP_OPTPAR_PM_STATE = 1 << 10, + MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, + MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, + MLX5_QP_OPTPAR_SRQN = 1 << 18, + MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, + MLX5_QP_OPTPAR_DC_HS = 1 << 20, + MLX5_QP_OPTPAR_DC_KEY = 1 << 21, +}; + +enum mlx5_qp_state { + MLX5_QP_STATE_RST = 0, + MLX5_QP_STATE_INIT = 1, + MLX5_QP_STATE_RTR = 2, + MLX5_QP_STATE_RTS = 3, + MLX5_QP_STATE_SQER = 4, + MLX5_QP_STATE_SQD = 5, + MLX5_QP_STATE_ERR = 6, + MLX5_QP_STATE_SQ_DRAINING = 7, + MLX5_QP_STATE_SUSPENDED = 9, + MLX5_QP_NUM_STATE +}; + +enum { + MLX5_QP_ST_RC = 0x0, + MLX5_QP_ST_UC = 0x1, + MLX5_QP_ST_UD = 0x2, + MLX5_QP_ST_XRC = 0x3, + MLX5_QP_ST_MLX = 0x4, + MLX5_QP_ST_DCI = 0x5, + MLX5_QP_ST_DCT = 0x6, + MLX5_QP_ST_QP0 = 0x7, + MLX5_QP_ST_QP1 = 0x8, + MLX5_QP_ST_RAW_ETHERTYPE = 0x9, + MLX5_QP_ST_RAW_IPV6 = 0xa, + MLX5_QP_ST_SNIFFER = 0xb, + MLX5_QP_ST_SYNC_UMR = 0xe, + MLX5_QP_ST_PTP_1588 = 0xd, + MLX5_QP_ST_REG_UMR = 0xc, + MLX5_QP_ST_MAX +}; + +enum { + MLX5_QP_PM_MIGRATED = 0x3, + MLX5_QP_PM_ARMED = 0x0, + MLX5_QP_PM_REARM = 0x1 +}; + +enum { + MLX5_NON_ZERO_RQ = 0 << 24, + MLX5_SRQ_RQ = 1 << 24, + MLX5_CRQ_RQ = 2 << 24, + MLX5_ZERO_LEN_RQ = 3 << 24 +}; + +enum { + /* params1 */ + MLX5_QP_BIT_SRE = 1 << 15, + MLX5_QP_BIT_SWE = 1 << 14, + MLX5_QP_BIT_SAE = 1 << 13, + /* params2 */ + MLX5_QP_BIT_RRE = 1 << 15, + MLX5_QP_BIT_RWE = 1 << 14, + MLX5_QP_BIT_RAE = 1 << 13, + MLX5_QP_BIT_RIC = 1 << 4, +}; + +enum { + MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, + MLX5_WQE_CTRL_SOLICITED = 1 << 1, +}; + +enum { + MLX5_SEND_WQE_BB = 64, +}; + +enum { + MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27, + MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28, + MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29, + MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30, + MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31 +}; + +enum { + MLX5_FENCE_MODE_NONE = 0 << 5, + MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, + MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, + MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, +}; + +enum { + MLX5_QP_LAT_SENSITIVE = 1 << 28, + MLX5_QP_ENABLE_SIG = 1 << 31, +}; + +enum { + MLX5_RCV_DBR = 0, + MLX5_SND_DBR = 1, +}; + +struct mlx5_wqe_fmr_seg { + __be32 flags; + __be32 mem_key; + __be64 buf_list; + __be64 start_addr; + __be64 reg_len; + __be32 offset; + __be32 page_size; + u32 reserved[2]; +}; + +struct mlx5_wqe_ctrl_seg { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +}; + +struct mlx5_wqe_xrc_seg { + __be32 xrc_srqn; + u8 rsvd[12]; +}; + +struct mlx5_wqe_masked_atomic_seg { + __be64 swap_add; + __be64 compare; + __be64 swap_add_mask; + __be64 compare_mask; +}; + +struct mlx5_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + __be16 rlid; + u8 reserved0[10]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +}; + +struct mlx5_wqe_datagram_seg { + struct mlx5_av av; +}; + +struct mlx5_wqe_raddr_seg { + __be64 raddr; + __be32 rkey; + u32 reserved; +}; + +struct mlx5_wqe_atomic_seg { + __be64 swap_add; + __be64 compare; +}; + +struct mlx5_wqe_data_seg { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct mlx5_wqe_umr_ctrl_seg { + u8 flags; + u8 rsvd0[3]; + __be16 klm_octowords; + __be16 bsf_octowords; + __be64 mkey_mask; + u8 rsvd1[32]; +}; + +struct mlx5_seg_set_psv { + __be32 psv_num; + __be16 syndrome; + __be16 status; + __be32 transient_sig; + __be32 ref_tag; +}; + +struct mlx5_seg_get_psv { + u8 rsvd[19]; + u8 num_psv; + __be32 l_key; + __be64 va; + __be32 psv_index[4]; +}; + +struct mlx5_seg_check_psv { + u8 rsvd0[2]; + __be16 err_coalescing_op; + u8 rsvd1[2]; + __be16 xport_err_op; + u8 rsvd2[2]; + __be16 xport_err_mask; + u8 rsvd3[7]; + u8 num_psv; + __be32 l_key; + __be64 va; + __be32 psv_index[4]; +}; + +struct mlx5_rwqe_sig { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +}; + +struct mlx5_wqe_signature_seg { + u8 rsvd0[4]; + u8 signature; + u8 rsvd1[11]; +}; + +struct mlx5_wqe_inline_seg { + __be32 byte_count; +}; + +struct mlx5_core_qp { + void (*event) (struct mlx5_core_qp *, int); + int qpn; + atomic_t refcount; + struct completion free; + struct mlx5_rsc_debug *dbg; + int pid; +}; + +struct mlx5_qp_path { + u8 fl; + u8 rsvd3; + u8 free_ar; + u8 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 rsvd1[4]; + u8 sl; + u8 port; + u8 rsvd2[6]; +}; + +struct mlx5_qp_context { + __be32 flags; + __be32 flags_pd; + u8 mtu_msgmax; + u8 rq_size_stride; + __be16 sq_crq_size; + __be32 qp_counter_set_usr_page; + __be32 wire_qpn; + __be32 log_pg_sz_remote_qpn; + struct mlx5_qp_path pri_path; + struct mlx5_qp_path alt_path; + __be32 params1; + u8 reserved2[4]; + __be32 next_send_psn; + __be32 cqn_send; + u8 reserved3[8]; + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 xrcd; + __be32 cqn_recv; + __be64 db_rec_addr; + __be32 qkey; + __be32 rq_type_srqn; + __be32 rmsn; + __be16 hw_sq_wqe_counter; + __be16 sw_sq_wqe_counter; + __be16 hw_rcyclic_byte_counter; + __be16 hw_rq_counter; + __be16 sw_rcyclic_byte_counter; + __be16 sw_rq_counter; + u8 rsvd0[5]; + u8 cgs; + u8 cs_req; + u8 cs_res; + __be64 dc_access_key; + u8 rsvd1[24]; +}; + +struct mlx5_create_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 input_qpn; + u8 rsvd0[4]; + __be32 opt_param_mask; + u8 rsvd1[4]; + struct mlx5_qp_context ctx; + u8 rsvd3[16]; + __be64 pas[0]; +}; + +struct mlx5_create_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 qpn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd0[4]; +}; + +struct mlx5_destroy_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + +struct mlx5_modify_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd1[4]; + __be32 optparam; + u8 rsvd0[4]; + struct mlx5_qp_context ctx; +}; + +struct mlx5_modify_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd0[8]; +}; + +struct mlx5_query_qp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct mlx5_query_qp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd1[8]; + __be32 optparam; + u8 rsvd0[4]; + struct mlx5_qp_context ctx; + u8 rsvd2[16]; + __be64 pas[0]; +}; + +struct mlx5_conf_sqp_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[3]; + u8 type; +}; + +struct mlx5_conf_sqp_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_xrcd_mbox_in { + struct mlx5_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct mlx5_alloc_xrcd_mbox_out { + struct mlx5_outbox_hdr hdr; + __be32 xrcdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_xrcd_mbox_in { + struct mlx5_inbox_hdr hdr; + __be32 xrcdn; + u8 rsvd[4]; +}; + +struct mlx5_dealloc_xrcd_mbox_out { + struct mlx5_outbox_hdr hdr; + u8 rsvd[8]; +}; + +static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) +{ + return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); +} + +int mlx5_core_create_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp, + struct mlx5_create_qp_mbox_in *in, + int inlen); +int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, + enum mlx5_qp_state new_state, + struct mlx5_modify_qp_mbox_in *in, int sqd_event, + struct mlx5_core_qp *qp); +int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, + struct mlx5_core_qp *qp); +int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, + struct mlx5_query_qp_mbox_out *out, int outlen); + +int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); +int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); +void mlx5_init_qp_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); +int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); +void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); + +#endif /* MLX5_QP_H */ diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h new file mode 100644 index 0000000..e1a363a --- /dev/null +++ b/include/linux/mlx5/srq.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef MLX5_SRQ_H +#define MLX5_SRQ_H + +#include <linux/mlx5/driver.h> + +void mlx5_init_srq_table(struct mlx5_core_dev *dev); +void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); + +#endif /* MLX5_SRQ_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b87681a..f022460 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -151,12 +151,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) #endif -#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ) -#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK -#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK)) -#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ) -#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) - /* * Special vmas that are non-mergable, non-mlock()able. * Note: mm/huge_memory.c VM_NO_THP depends on this definition. diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ace9a5f..fb425aa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -330,12 +330,9 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif unsigned long mmap_base; /* base of mmap area */ unsigned long task_size; /* size of task vm space */ - unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ - unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index f31725b..842de3e 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -94,7 +94,11 @@ struct mmc_ext_csd { u8 raw_ext_csd_structure; /* 194 */ u8 raw_card_type; /* 196 */ u8 out_of_int_time; /* 198 */ - u8 raw_s_a_timeout; /* 217 */ + u8 raw_pwr_cl_52_195; /* 200 */ + u8 raw_pwr_cl_26_195; /* 201 */ + u8 raw_pwr_cl_52_360; /* 202 */ + u8 raw_pwr_cl_26_360; /* 203 */ + u8 raw_s_a_timeout; /* 217 */ u8 raw_hc_erase_gap_size; /* 221 */ u8 raw_erase_timeout_mult; /* 223 */ u8 raw_hc_erase_grp_size; /* 224 */ @@ -102,6 +106,10 @@ struct mmc_ext_csd { u8 raw_sec_erase_mult; /* 230 */ u8 raw_sec_feature_support;/* 231 */ u8 raw_trim_mult; /* 232 */ + u8 raw_pwr_cl_200_195; /* 236 */ + u8 raw_pwr_cl_200_360; /* 237 */ + u8 raw_pwr_cl_ddr_52_195; /* 238 */ + u8 raw_pwr_cl_ddr_52_360; /* 239 */ u8 raw_bkops_status; /* 246 */ u8 raw_sectors[4]; /* 212 - 4 bytes */ @@ -512,6 +520,7 @@ struct mmc_driver { void (*remove)(struct mmc_card *); int (*suspend)(struct mmc_card *); int (*resume)(struct mmc_card *); + void (*shutdown)(struct mmc_card *); }; extern int mmc_register_driver(struct mmc_driver *); diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 39613b9..443243b 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -96,6 +96,8 @@ struct mmc_command { */ unsigned int cmd_timeout_ms; /* in milliseconds */ + /* Set this flag only for blocking sanitize request */ + bool sanitize_busy; struct mmc_data *data; /* data segment associated with cmd */ struct mmc_request *mrq; /* associated request */ @@ -188,6 +190,9 @@ extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); extern int mmc_try_claim_host(struct mmc_host *host); +extern void mmc_get_card(struct mmc_card *card); +extern void mmc_put_card(struct mmc_card *card); + extern int mmc_flush_cache(struct mmc_card *); extern int mmc_detect_card_removed(struct mmc_host *host); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index e326ae2..3b0c33a 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -239,7 +239,7 @@ struct mmc_host { #define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */ #define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */ #define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */ - +#define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ #define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ @@ -264,7 +264,7 @@ struct mmc_host { #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_CACHE_CTRL (1 << 1) /* Allow cache control */ -#define MMC_CAP2_POWEROFF_NOTIFY (1 << 2) /* Notify poweroff supported */ +#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ #define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */ #define MMC_CAP2_NO_SLEEP_CMD (1 << 4) /* Don't allow sleep command */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ @@ -272,7 +272,6 @@ struct mmc_host { #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ MMC_CAP2_HS200_1_2V_SDR) #define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */ -#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */ #define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */ #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ @@ -281,6 +280,7 @@ struct mmc_host { #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \ MMC_CAP2_PACKED_WR) #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ +#define MMC_CAP2_SANITIZE (1 << 15) /* Support Sanitize */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -369,7 +369,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *); int mmc_add_host(struct mmc_host *); void mmc_remove_host(struct mmc_host *); void mmc_free_host(struct mmc_host *); -void mmc_of_parse(struct mmc_host *host); +int mmc_of_parse(struct mmc_host *host); static inline void *mmc_priv(struct mmc_host *host) { @@ -425,10 +425,6 @@ static inline int mmc_regulator_get_supply(struct mmc_host *mmc) } #endif -int mmc_card_awake(struct mmc_host *host); -int mmc_card_sleep(struct mmc_host *host); -int mmc_card_can_sleep(struct mmc_host *host); - int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); /* Module parameter */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index b838ffc..e3c6a74 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -95,6 +95,9 @@ struct sdhci_host { /* The system physically doesn't support 1.8v, even if the host does */ #define SDHCI_QUIRK2_NO_1_8_V (1<<2) #define SDHCI_QUIRK2_PRESET_VALUE_BROKEN (1<<3) +#define SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON (1<<4) +/* Controller has a non-standard host control register */ +#define SDHCI_QUIRK2_BROKEN_HOST_CONTROL (1<<5) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ @@ -126,7 +129,7 @@ struct sdhci_host { #define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */ #define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */ #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ -#define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */ +#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ unsigned int version; /* SDHCI spec. version */ @@ -139,6 +142,7 @@ struct sdhci_host { u8 pwr; /* Current voltage */ bool runtime_suspended; /* Host is runtime suspended */ + bool bus_on; /* Bus power prevents runtime suspend */ struct mmc_request *mrq; /* Current request */ struct mmc_command *cmd; /* Current command */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ae19af5..af4a3b7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -869,11 +869,6 @@ static inline int is_highmem_idx(enum zone_type idx) #endif } -static inline int is_normal_idx(enum zone_type idx) -{ - return (idx == ZONE_NORMAL); -} - /** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references @@ -892,29 +887,6 @@ static inline int is_highmem(struct zone *zone) #endif } -static inline int is_normal(struct zone *zone) -{ - return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; -} - -static inline int is_dma32(struct zone *zone) -{ -#ifdef CONFIG_ZONE_DMA32 - return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; -#else - return 0; -#endif -} - -static inline int is_dma(struct zone *zone) -{ -#ifdef CONFIG_ZONE_DMA - return zone == zone->zone_pgdat->node_zones + ZONE_DMA; -#else - return 0; -#endif -} - /* These two functions are used to setup the per zone pages min values */ struct ctl_table; int min_free_kbytes_sysctl_handler(struct ctl_table *, int, diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b62d4af..45e9214 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -361,7 +361,8 @@ struct ssb_device_id { __u16 vendor; __u16 coreid; __u8 revision; -}; + __u8 __pad; +} __attribute__((packed, aligned(2))); #define SSB_DEVICE(_vendor, _coreid, _revision) \ { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } #define SSB_DEVTABLE_END \ @@ -377,7 +378,7 @@ struct bcma_device_id { __u16 id; __u8 rev; __u8 class; -}; +} __attribute__((packed,aligned(2))); #define BCMA_CORE(_manuf, _id, _rev, _class) \ { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } #define BCMA_CORETABLE_END \ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 137b419..27d9da3 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -439,7 +439,7 @@ extern struct kernel_param_ops param_ops_string; extern int param_set_copystring(const char *val, const struct kernel_param *); extern int param_get_string(char *buffer, const struct kernel_param *kp); -/* for exporting parameters in /sys/parameters */ +/* for exporting parameters in /sys/module/.../parameters */ struct module; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3793ed7..ccd4260 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -78,40 +78,6 @@ struct mutex_waiter { #endif }; -struct ww_class { - atomic_long_t stamp; - struct lock_class_key acquire_key; - struct lock_class_key mutex_key; - const char *acquire_name; - const char *mutex_name; -}; - -struct ww_acquire_ctx { - struct task_struct *task; - unsigned long stamp; - unsigned acquired; -#ifdef CONFIG_DEBUG_MUTEXES - unsigned done_acquire; - struct ww_class *ww_class; - struct ww_mutex *contending_lock; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH - unsigned deadlock_inject_interval; - unsigned deadlock_inject_countdown; -#endif -}; - -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES # include <linux/mutex-debug.h> #else @@ -136,11 +102,8 @@ static inline void mutex_destroy(struct mutex *lock) {} #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ , .dep_map = { .name = #lockname } -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ - , .ww_class = &ww_class #else # define __DEP_MAP_MUTEX_INITIALIZER(lockname) -# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) #endif #define __MUTEX_INITIALIZER(lockname) \ @@ -150,49 +113,13 @@ static inline void mutex_destroy(struct mutex *lock) {} __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } -#define __WW_CLASS_INITIALIZER(ww_class) \ - { .stamp = ATOMIC_LONG_INIT(0) \ - , .acquire_name = #ww_class "_acquire" \ - , .mutex_name = #ww_class "_mutex" } - -#define __WW_MUTEX_INITIALIZER(lockname, class) \ - { .base = { \__MUTEX_INITIALIZER(lockname) } \ - __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } - #define DEFINE_MUTEX(mutexname) \ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -#define DEFINE_WW_CLASS(classname) \ - struct ww_class classname = __WW_CLASS_INITIALIZER(classname) - -#define DEFINE_WW_MUTEX(mutexname, ww_class) \ - struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) - - extern void __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key); /** - * ww_mutex_init - initialize the w/w mutex - * @lock: the mutex to be initialized - * @ww_class: the w/w class the mutex should belong to - * - * Initialize the w/w mutex to unlocked state and associate it with the given - * class. - * - * It is not allowed to initialize an already locked mutex. - */ -static inline void ww_mutex_init(struct ww_mutex *lock, - struct ww_class *ww_class) -{ - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); - lock->ctx = NULL; -#ifdef CONFIG_DEBUG_MUTEXES - lock->ww_class = ww_class; -#endif -} - -/** * mutex_is_locked - is the mutex locked * @lock: the mutex to be queried * @@ -246,291 +173,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); -/** - * ww_acquire_init - initialize a w/w acquire context - * @ctx: w/w acquire context to initialize - * @ww_class: w/w class of the context - * - * Initializes an context to acquire multiple mutexes of the given w/w class. - * - * Context-based w/w mutex acquiring can be done in any order whatsoever within - * a given lock class. Deadlocks will be detected and handled with the - * wait/wound logic. - * - * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can - * result in undetected deadlocks and is so forbidden. Mixing different contexts - * for the same w/w class when acquiring mutexes can also result in undetected - * deadlocks, and is hence also forbidden. Both types of abuse will be caught by - * enabling CONFIG_PROVE_LOCKING. - * - * Nesting of acquire contexts for _different_ w/w classes is possible, subject - * to the usual locking rules between different lock classes. - * - * An acquire context must be released with ww_acquire_fini by the same task - * before the memory is freed. It is recommended to allocate the context itself - * on the stack. - */ -static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, - struct ww_class *ww_class) -{ - ctx->task = current; - ctx->stamp = atomic_long_inc_return(&ww_class->stamp); - ctx->acquired = 0; -#ifdef CONFIG_DEBUG_MUTEXES - ctx->ww_class = ww_class; - ctx->done_acquire = 0; - ctx->contending_lock = NULL; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); - lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, - &ww_class->acquire_key, 0); - mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); -#endif -#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH - ctx->deadlock_inject_interval = 1; - ctx->deadlock_inject_countdown = ctx->stamp & 0xf; -#endif -} - -/** - * ww_acquire_done - marks the end of the acquire phase - * @ctx: the acquire context - * - * Marks the end of the acquire phase, any further w/w mutex lock calls using - * this context are forbidden. - * - * Calling this function is optional, it is just useful to document w/w mutex - * code and clearly designated the acquire phase from actually using the locked - * data structures. - */ -static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) -{ -#ifdef CONFIG_DEBUG_MUTEXES - lockdep_assert_held(ctx); - - DEBUG_LOCKS_WARN_ON(ctx->done_acquire); - ctx->done_acquire = 1; -#endif -} - -/** - * ww_acquire_fini - releases a w/w acquire context - * @ctx: the acquire context to free - * - * Releases a w/w acquire context. This must be called _after_ all acquired w/w - * mutexes have been released with ww_mutex_unlock. - */ -static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) -{ -#ifdef CONFIG_DEBUG_MUTEXES - mutex_release(&ctx->dep_map, 0, _THIS_IP_); - - DEBUG_LOCKS_WARN_ON(ctx->acquired); - if (!config_enabled(CONFIG_PROVE_LOCKING)) - /* - * lockdep will normally handle this, - * but fail without anyway - */ - ctx->done_acquire = 1; - - if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) - /* ensure ww_acquire_fini will still fail if called twice */ - ctx->acquired = ~0U; -#endif -} - -extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, - struct ww_acquire_ctx *ctx); -extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, - struct ww_acquire_ctx *ctx); - -/** - * ww_mutex_lock - acquire the w/w mutex - * @lock: the mutex to be acquired - * @ctx: w/w acquire context, or NULL to acquire only a single lock. - * - * Lock the w/w mutex exclusively for this task. - * - * Deadlocks within a given w/w class of locks are detected and handled with the - * wait/wound algorithm. If the lock isn't immediately avaiable this function - * will either sleep until it is (wait case). Or it selects the current context - * for backing off by returning -EDEADLK (wound case). Trying to acquire the - * same lock with the same context twice is also detected and signalled by - * returning -EALREADY. Returns 0 if the mutex was successfully acquired. - * - * In the wound case the caller must release all currently held w/w mutexes for - * the given context and then wait for this contending lock to be available by - * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this - * lock and proceed with trying to acquire further w/w mutexes (e.g. when - * scanning through lru lists trying to free resources). - * - * The mutex must later on be released by the same task that - * acquired it. The task may not exit without first unlocking the mutex. Also, - * kernel memory where the mutex resides must not be freed with the mutex still - * locked. The mutex must first be initialized (or statically defined) before it - * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be - * of the same w/w lock class as was used to initialize the acquire context. - * - * A mutex acquired with this function must be released with ww_mutex_unlock. - */ -static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -{ - if (ctx) - return __ww_mutex_lock(lock, ctx); - else { - mutex_lock(&lock->base); - return 0; - } -} - -/** - * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible - * @lock: the mutex to be acquired - * @ctx: w/w acquire context - * - * Lock the w/w mutex exclusively for this task. - * - * Deadlocks within a given w/w class of locks are detected and handled with the - * wait/wound algorithm. If the lock isn't immediately avaiable this function - * will either sleep until it is (wait case). Or it selects the current context - * for backing off by returning -EDEADLK (wound case). Trying to acquire the - * same lock with the same context twice is also detected and signalled by - * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a - * signal arrives while waiting for the lock then this function returns -EINTR. - * - * In the wound case the caller must release all currently held w/w mutexes for - * the given context and then wait for this contending lock to be available by - * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to - * not acquire this lock and proceed with trying to acquire further w/w mutexes - * (e.g. when scanning through lru lists trying to free resources). - * - * The mutex must later on be released by the same task that - * acquired it. The task may not exit without first unlocking the mutex. Also, - * kernel memory where the mutex resides must not be freed with the mutex still - * locked. The mutex must first be initialized (or statically defined) before it - * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be - * of the same w/w lock class as was used to initialize the acquire context. - * - * A mutex acquired with this function must be released with ww_mutex_unlock. - */ -static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, - struct ww_acquire_ctx *ctx) -{ - if (ctx) - return __ww_mutex_lock_interruptible(lock, ctx); - else - return mutex_lock_interruptible(&lock->base); -} - -/** - * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex - * @lock: the mutex to be acquired - * @ctx: w/w acquire context - * - * Acquires a w/w mutex with the given context after a wound case. This function - * will sleep until the lock becomes available. - * - * The caller must have released all w/w mutexes already acquired with the - * context and then call this function on the contended lock. - * - * Afterwards the caller may continue to (re)acquire the other w/w mutexes it - * needs with ww_mutex_lock. Note that the -EALREADY return code from - * ww_mutex_lock can be used to avoid locking this contended mutex twice. - * - * It is forbidden to call this function with any other w/w mutexes associated - * with the context held. It is forbidden to call this on anything else than the - * contending mutex. - * - * Note that the slowpath lock acquiring can also be done by calling - * ww_mutex_lock directly. This function here is simply to help w/w mutex - * locking code readability by clearly denoting the slowpath. - */ -static inline void -ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -{ - int ret; -#ifdef CONFIG_DEBUG_MUTEXES - DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); -#endif - ret = ww_mutex_lock(lock, ctx); - (void)ret; -} - -/** - * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, - * interruptible - * @lock: the mutex to be acquired - * @ctx: w/w acquire context - * - * Acquires a w/w mutex with the given context after a wound case. This function - * will sleep until the lock becomes available and returns 0 when the lock has - * been acquired. If a signal arrives while waiting for the lock then this - * function returns -EINTR. - * - * The caller must have released all w/w mutexes already acquired with the - * context and then call this function on the contended lock. - * - * Afterwards the caller may continue to (re)acquire the other w/w mutexes it - * needs with ww_mutex_lock. Note that the -EALREADY return code from - * ww_mutex_lock can be used to avoid locking this contended mutex twice. - * - * It is forbidden to call this function with any other w/w mutexes associated - * with the given context held. It is forbidden to call this on anything else - * than the contending mutex. - * - * Note that the slowpath lock acquiring can also be done by calling - * ww_mutex_lock_interruptible directly. This function here is simply to help - * w/w mutex locking code readability by clearly denoting the slowpath. - */ -static inline int __must_check -ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, - struct ww_acquire_ctx *ctx) -{ -#ifdef CONFIG_DEBUG_MUTEXES - DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); -#endif - return ww_mutex_lock_interruptible(lock, ctx); -} - -extern void ww_mutex_unlock(struct ww_mutex *lock); - -/** - * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context - * @lock: mutex to lock - * - * Trylocks a mutex without acquire context, so no deadlock detection is - * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. - */ -static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) -{ - return mutex_trylock(&lock->base); -} - -/*** - * ww_mutex_destroy - mark a w/w mutex unusable - * @lock: the mutex to be destroyed - * - * This function marks the mutex uninitialized, and any subsequent - * use of the mutex is forbidden. The mutex must not be locked when - * this function is called. - */ -static inline void ww_mutex_destroy(struct ww_mutex *lock) -{ - mutex_destroy(&lock->base); -} - -/** - * ww_mutex_is_locked - is the w/w mutex locked - * @lock: the mutex to be queried - * - * Returns 1 if the mutex is locked, 0 if unlocked. - */ -static inline bool ww_mutex_is_locked(struct ww_mutex *lock) -{ - return mutex_is_locked(&lock->base); -} - extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h index 141d395..6e8215b 100644 --- a/include/linux/mv643xx_eth.h +++ b/include/linux/mv643xx_eth.h @@ -30,6 +30,7 @@ struct mv643xx_eth_shared_platform_data { #define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x)) #define MV643XX_ETH_PHY_NONE 0xff +struct device_node; struct mv643xx_eth_platform_data { /* * Pointer back to our parent instance, and our port number. @@ -41,6 +42,7 @@ struct mv643xx_eth_platform_data { * Whether a PHY is present, and if yes, at which address. */ int phy_addr; + struct device_node *phy_node; /* * Use this MAC address if it is valid, overriding the diff --git a/include/linux/net.h b/include/linux/net.h index 99c9f0c..4f27575 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -79,9 +79,9 @@ enum sock_type { #endif /* ARCH_HAS_SOCKET_TYPES */ enum sock_shutdown_cmd { - SHUT_RD = 0, - SHUT_WR = 1, - SHUT_RDWR = 2, + SHUT_RD, + SHUT_WR, + SHUT_RDWR, }; struct socket_wq { diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 09906b7..a2a89a5 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -43,8 +43,9 @@ enum { NETIF_F_FSO_BIT, /* ... FCoE segmentation */ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ + NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_UDP_TUNNEL_BIT, + NETIF_F_GSO_MPLS_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ @@ -107,6 +108,7 @@ enum { #define NETIF_F_RXALL __NETIF_F(RXALL) #define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) +#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 96e4c21..9a41568 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -324,12 +324,15 @@ struct napi_struct { struct sk_buff *gro_list; struct sk_buff *skb; struct list_head dev_list; + struct hlist_node napi_hash_node; + unsigned int napi_id; }; enum { NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_HASHED, /* In NAPI hash */ }; enum gro_result { @@ -446,6 +449,32 @@ extern void __napi_complete(struct napi_struct *n); extern void napi_complete(struct napi_struct *n); /** + * napi_by_id - lookup a NAPI by napi_id + * @napi_id: hashed napi_id + * + * lookup @napi_id in napi_hash table + * must be called under rcu_read_lock() + */ +extern struct napi_struct *napi_by_id(unsigned int napi_id); + +/** + * napi_hash_add - add a NAPI to global hashtable + * @napi: napi context + * + * generate a new napi_id and store a @napi under it in napi_hash + */ +extern void napi_hash_add(struct napi_struct *napi); + +/** + * napi_hash_del - remove a NAPI from global table + * @napi: napi context + * + * Warning: caller must observe rcu grace period + * before freeing memory containing @napi + */ +extern void napi_hash_del(struct napi_struct *napi); + +/** * napi_disable - prevent NAPI from scheduling * @n: napi context * @@ -800,6 +829,7 @@ struct netdev_fcoe_hbainfo { * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); + * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); * int (*ndo_set_vf_port)(struct net_device *dev, int vf, * struct nlattr *port[]); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); @@ -943,6 +973,9 @@ struct net_device_ops { gfp_t gfp); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif +#ifdef CONFIG_NET_RX_BUSY_POLL + int (*ndo_busy_poll)(struct napi_struct *dev); +#endif int (*ndo_set_vf_mac)(struct net_device *dev, int queue, u8 *mac); int (*ndo_set_vf_vlan)(struct net_device *dev, @@ -954,6 +987,8 @@ struct net_device_ops { int (*ndo_get_vf_config)(struct net_device *dev, int vf, struct ifla_vf_info *ivf); + int (*ndo_set_vf_link_state)(struct net_device *dev, + int vf, int link_state); int (*ndo_set_vf_port)(struct net_device *dev, int vf, struct nlattr *port[]); @@ -1088,6 +1123,8 @@ struct net_device { * need to set them appropriately. */ netdev_features_t hw_enc_features; + /* mask of fetures inheritable by MPLS */ + netdev_features_t mpls_features; /* Interface index. Unique device identifier */ int ifindex; @@ -1140,8 +1177,10 @@ struct net_device { unsigned char addr_assign_type; /* hw address assignment type */ unsigned char addr_len; /* hardware address length */ unsigned char neigh_priv_len; - unsigned short dev_id; /* for shared network cards */ - + unsigned short dev_id; /* Used to differentiate devices + * that share the same link + * layer address + */ spinlock_t addr_list_lock; struct netdev_hw_addr_list uc; /* Unicast mac addresses */ struct netdev_hw_addr_list mc; /* Multicast mac addresses */ @@ -1593,9 +1632,34 @@ struct packet_offload { #define NETDEV_RELEASE 0x0012 #define NETDEV_NOTIFY_PEERS 0x0013 #define NETDEV_JOIN 0x0014 +#define NETDEV_CHANGEUPPER 0x0015 extern int register_netdevice_notifier(struct notifier_block *nb); extern int unregister_netdevice_notifier(struct notifier_block *nb); + +struct netdev_notifier_info { + struct net_device *dev; +}; + +struct netdev_notifier_change_info { + struct netdev_notifier_info info; /* must be first */ + unsigned int flags_changed; +}; + +static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, + struct net_device *dev) +{ + info->dev = dev; +} + +static inline struct net_device * +netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) +{ + return info->dev; +} + +extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, + struct netdev_notifier_info *info); extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); @@ -1779,6 +1843,19 @@ static inline int unregister_gifconf(unsigned int family) return register_gifconf(family, NULL); } +#ifdef CONFIG_NET_FLOW_LIMIT +#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ +struct sd_flow_limit { + u64 count; + unsigned int num_buckets; + unsigned int history_head; + u16 history[FLOW_LIMIT_HISTORY]; + u8 buckets[]; +}; + +extern int netdev_flow_limit_table_len; +#endif /* CONFIG_NET_FLOW_LIMIT */ + /* * Incoming packets are placed on per-cpu queues */ @@ -1808,6 +1885,10 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + +#ifdef CONFIG_NET_FLOW_LIMIT + struct sd_flow_limit __rcu *flow_limit; +#endif }; static inline void input_queue_head_incr(struct softnet_data *sd) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 0060fde..de70f7b 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -35,7 +35,7 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1, result->all[3] = a1->all[3] & mask->all[3]; } -extern void netfilter_init(void); +extern int netfilter_init(void); /* Largest hook number + 1 */ #define NF_MAX_HOOKS 8 diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 6358da5..7a6c396 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -46,6 +46,7 @@ struct netlink_kernel_cfg { void (*input)(struct sk_buff *skb); struct mutex *cb_mutex; void (*bind)(int group); + bool (*compare)(struct net *net, struct sock *sk); }; extern struct sock *__netlink_kernel_create(struct net *net, int unit, @@ -84,6 +85,22 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, void netlink_detachskb(struct sock *sk, struct sk_buff *skb); int netlink_sendskb(struct sock *sk, struct sk_buff *skb); +static inline struct sk_buff * +netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask) +{ + struct sk_buff *nskb; + + nskb = skb_clone(skb, gfp_mask); + if (!nskb) + return NULL; + + /* This is a large skb, set destructor callback to release head */ + if (is_vmalloc_addr(skb->head)) + nskb->destructor = skb->destructor; + + return nskb; +} + /* * skb should fit one page. This choice is good for headerless malloc. * But we should limit to 8K so that userspace does not have to @@ -144,4 +161,14 @@ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, return __netlink_dump_start(ssk, skb, nlh, control); } +struct netlink_tap { + struct net_device *dev; + struct module *module; + struct list_head list; +}; + +extern int netlink_add_tap(struct netlink_tap *nt); +extern int __netlink_remove_tap(struct netlink_tap *nt); +extern int netlink_remove_tap(struct netlink_tap *nt); + #endif /* __LINUX_NETLINK_H */ diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index fa2cb76..f3c7c24 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -53,10 +53,10 @@ struct netpoll_info { }; #ifdef CONFIG_NETPOLL -extern int netpoll_rx_disable(struct net_device *dev); +extern void netpoll_rx_disable(struct net_device *dev); extern void netpoll_rx_enable(struct net_device *dev); #else -static inline int netpoll_rx_disable(struct net_device *dev) { return 0; } +static inline void netpoll_rx_disable(struct net_device *dev) { return; } static inline void netpoll_rx_enable(struct net_device *dev) { return; } #endif diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 7b8fc73..e36dee5 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -32,6 +32,15 @@ struct nfs4_acl { struct nfs4_ace aces[0]; }; +#define NFS4_MAXLABELLEN 2048 + +struct nfs4_label { + uint32_t lfs; + uint32_t pi; + u32 len; + char *label; +}; + typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier; struct nfs_stateid4 { @@ -219,6 +228,14 @@ enum nfsstat4 { NFS4ERR_REJECT_DELEG = 10085, /* on callback */ NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */ NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */ + + /* nfs42 */ + NFS4ERR_PARTNER_NOTSUPP = 10088, + NFS4ERR_PARTNER_NO_AUTH = 10089, + NFS4ERR_METADATA_NOTSUPP = 10090, + NFS4ERR_OFFLOAD_DENIED = 10091, + NFS4ERR_WRONG_LFS = 10092, + NFS4ERR_BADLABEL = 10093, }; static inline bool seqid_mutating_err(u32 err) @@ -378,6 +395,7 @@ enum lock_type4 { #define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) +#define FATTR4_WORD2_SECURITY_LABEL (1UL << 17) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -390,11 +408,15 @@ enum lock_type4 { #define NFS4_VERSION 4 #define NFS4_MINOR_VERSION 0 +#if defined(CONFIG_NFS_V4_2) +#define NFS4_MAX_MINOR_VERSION 2 +#else #if defined(CONFIG_NFS_V4_1) #define NFS4_MAX_MINOR_VERSION 1 #else #define NFS4_MAX_MINOR_VERSION 0 #endif /* CONFIG_NFS_V4_1 */ +#endif /* CONFIG_NFS_V4_2 */ #define NFS4_DEBUG 1 diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index fc01d5c..7125cef 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -207,6 +207,7 @@ struct nfs_inode { #define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */ #define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */ #define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */ +#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */ /* * Bit offsets in flags field @@ -336,7 +337,7 @@ extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); extern void nfs_zap_caches(struct inode *); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, - struct nfs_fattr *); + struct nfs_fattr *, struct nfs4_label *); extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); @@ -347,15 +348,19 @@ extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_release(struct inode *, struct file *); extern int nfs_attribute_timeout(struct inode *inode); +extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); +extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, + struct nfs4_label *label); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode); +extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); @@ -468,7 +473,8 @@ extern const struct file_operations nfs_dir_operations; extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); -extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); +extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, + struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); @@ -497,6 +503,24 @@ extern int nfs_mountpoint_expiry_timeout; extern void nfs_release_automount_timer(void); /* + * linux/fs/nfs/nfs4proc.c + */ +#ifdef CONFIG_NFS_V4_SECURITY_LABEL +extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); +static inline void nfs4_label_free(struct nfs4_label *label) +{ + if (label) { + kfree(label->label); + kfree(label); + } + return; +} +#else +static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; } +static inline void nfs4_label_free(void *label) {} +#endif + +/* * linux/fs/nfs/unlink.c */ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 3b7fa2a..d221243 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -146,7 +146,12 @@ struct nfs_server { u32 attr_bitmask[3];/* V4 bitmask representing the set of attributes supported on this filesystem */ - u32 cache_consistency_bitmask[2]; + u32 attr_bitmask_nl[3]; + /* V4 bitmask representing the + set of attributes supported + on this filesystem excluding + the label support bit. */ + u32 cache_consistency_bitmask[3]; /* V4 bitmask representing the subset of change attribute, size, ctime and mtime attributes supported by @@ -200,5 +205,6 @@ struct nfs_server { #define NFS_CAP_UIDGID_NOMAP (1U << 15) #define NFS_CAP_STATEID_NFSV41 (1U << 16) #define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) +#define NFS_CAP_SECURITY_LABEL (1U << 18) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 104b62f..8651574 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -101,6 +101,7 @@ struct nfs_fattr { #define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22) #define NFS_ATTR_FATTR_OWNER_NAME (1U << 23) #define NFS_ATTR_FATTR_GROUP_NAME (1U << 24) +#define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25) #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ | NFS_ATTR_FATTR_MODE \ @@ -120,7 +121,8 @@ struct nfs_fattr { #define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ | NFS_ATTR_FATTR_SPACE_USED) #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ - | NFS_ATTR_FATTR_SPACE_USED) + | NFS_ATTR_FATTR_SPACE_USED \ + | NFS_ATTR_FATTR_V4_SECURITY_LABEL) /* * Info on the file system @@ -246,6 +248,7 @@ struct nfs4_layoutget_res { struct nfs4_layoutget { struct nfs4_layoutget_args args; struct nfs4_layoutget_res res; + struct rpc_cred *cred; gfp_t gfp_flags; }; @@ -347,6 +350,7 @@ struct nfs_openargs { const u32 * open_bitmap; __u32 claim; enum createmode4 createmode; + const struct nfs4_label *label; }; struct nfs_openres { @@ -356,6 +360,7 @@ struct nfs_openres { struct nfs4_change_info cinfo; __u32 rflags; struct nfs_fattr * f_attr; + struct nfs4_label *f_label; struct nfs_seqid * seqid; const struct nfs_server *server; fmode_t delegation_type; @@ -598,6 +603,7 @@ struct nfs_entry { int eof; struct nfs_fh * fh; struct nfs_fattr * fattr; + struct nfs4_label *label; unsigned char d_type; struct nfs_server * server; }; @@ -630,6 +636,7 @@ struct nfs_setattrargs { struct iattr * iap; const struct nfs_server * server; /* Needed for name mapping */ const u32 * bitmask; + const struct nfs4_label *label; }; struct nfs_setaclargs { @@ -665,6 +672,7 @@ struct nfs_getaclres { struct nfs_setattrres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; + struct nfs4_label *label; const struct nfs_server * server; }; @@ -862,6 +870,7 @@ struct nfs4_create_arg { const struct iattr * attrs; const struct nfs_fh * dir_fh; const u32 * bitmask; + const struct nfs4_label *label; }; struct nfs4_create_res { @@ -869,6 +878,7 @@ struct nfs4_create_res { const struct nfs_server * server; struct nfs_fh * fh; struct nfs_fattr * fattr; + struct nfs4_label *label; struct nfs4_change_info dir_cinfo; }; @@ -893,6 +903,7 @@ struct nfs4_getattr_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; + struct nfs4_label *label; }; struct nfs4_link_arg { @@ -907,6 +918,7 @@ struct nfs4_link_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; + struct nfs4_label *label; struct nfs4_change_info cinfo; struct nfs_fattr * dir_attr; }; @@ -924,6 +936,7 @@ struct nfs4_lookup_res { const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs_fh * fh; + struct nfs4_label *label; }; struct nfs4_lookup_root_arg { @@ -1366,11 +1379,12 @@ struct nfs_rpc_ops { struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *, struct nfs_subversion *); int (*getattr) (struct nfs_server *, struct nfs_fh *, - struct nfs_fattr *); + struct nfs_fattr *, struct nfs4_label *); int (*setattr) (struct dentry *, struct nfs_fattr *, struct iattr *); int (*lookup) (struct inode *, struct qstr *, - struct nfs_fh *, struct nfs_fattr *); + struct nfs_fh *, struct nfs_fattr *, + struct nfs4_label *); int (*access) (struct inode *, struct nfs_access_entry *); int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index db50840..6a45fb5 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -46,7 +46,7 @@ static inline bool trigger_all_cpu_backtrace(void) #ifdef CONFIG_LOCKUP_DETECTOR int hw_nmi_is_cpu_stuck(struct pt_regs *); u64 hw_nmi_get_sample_period(int watchdog_thresh); -extern int watchdog_enabled; +extern int watchdog_user_enabled; extern int watchdog_thresh; struct ctl_table; extern int proc_dowatchdog(struct ctl_table *, int , diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 901b743..9d27475 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -4,12 +4,12 @@ #include <linux/platform_device.h> #include <linux/of_platform.h> /* temporary until merge */ -#ifdef CONFIG_OF_DEVICE #include <linux/of.h> #include <linux/mod_devicetable.h> struct device; +#ifdef CONFIG_OF extern const struct of_device_id *of_match_device( const struct of_device_id *matches, const struct device *dev); extern void of_device_make_bus_id(struct device *dev); @@ -43,7 +43,7 @@ static inline void of_device_node_put(struct device *dev) of_node_put(dev->of_node); } -#else /* CONFIG_OF_DEVICE */ +#else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, struct device_driver *drv) @@ -67,6 +67,6 @@ static inline const struct of_device_id *of_match_device( { return NULL; } -#endif /* CONFIG_OF_DEVICE */ +#endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 364dda7..ae36298 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -21,7 +21,6 @@ struct device_node; struct of_dma { struct list_head of_dma_controllers; struct device_node *of_node; - int of_dma_nbcells; struct dma_chan *(*of_dma_xlate) (struct of_phandle_args *, struct of_dma *); void *of_dma_data; diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 2a93b64..05cb4a9 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -13,8 +13,6 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> - -#ifdef CONFIG_OF_DEVICE #include <linux/pm.h> #include <linux/of_device.h> #include <linux/platform_device.h> @@ -53,27 +51,6 @@ struct of_dev_auxdata { { .compatible = _compat, .phys_addr = _phys, .name = _name, \ .platform_data = _pdata } -/** - * of_platform_driver - Legacy of-aware driver for platform devices. - * - * An of_platform_driver driver is attached to a basic platform_device on - * the ibm ebus (ibmebus_bus_type). - */ -struct of_platform_driver -{ - int (*probe)(struct platform_device* dev, - const struct of_device_id *match); - int (*remove)(struct platform_device* dev); - - int (*suspend)(struct platform_device* dev, pm_message_t state); - int (*resume)(struct platform_device* dev); - int (*shutdown)(struct platform_device* dev); - - struct device_driver driver; -}; -#define to_of_platform_driver(drv) \ - container_of(drv,struct of_platform_driver, driver) - extern const struct of_device_id of_default_bus_match_table[]; /* Platform drivers register/unregister */ @@ -82,7 +59,6 @@ extern struct platform_device *of_device_alloc(struct device_node *np, struct device *parent); extern struct platform_device *of_find_device_by_node(struct device_node *np); -#ifdef CONFIG_OF_ADDRESS /* device reg helpers depend on OF_ADDRESS */ /* Platform devices and busses creation */ extern struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, @@ -91,17 +67,12 @@ extern struct platform_device *of_platform_device_create(struct device_node *np, extern int of_platform_bus_probe(struct device_node *root, const struct of_device_id *matches, struct device *parent); +#ifdef CONFIG_OF_ADDRESS extern int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, struct device *parent); -#endif /* CONFIG_OF_ADDRESS */ - -#endif /* CONFIG_OF_DEVICE */ - -#if !defined(CONFIG_OF_ADDRESS) -struct of_dev_auxdata; -struct device_node; +#else static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, @@ -109,6 +80,6 @@ static inline int of_platform_populate(struct device_node *root, { return -ENODEV; } -#endif /* !CONFIG_OF_ADDRESS */ +#endif #endif /* _LINUX_OF_PLATFORM_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 8873f82..c43f6ea 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -826,7 +826,7 @@ static inline void perf_restore_debug_store(void) { } */ #define perf_cpu_notifier(fn) \ do { \ - static struct notifier_block fn##_nb __cpuinitdata = \ + static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ unsigned long cpu = smp_processor_id(); \ unsigned long flags; \ diff --git a/include/linux/phy.h b/include/linux/phy.h index 9e11039..64ab823 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -49,6 +49,7 @@ #define PHY_HAS_INTERRUPT 0x00000001 #define PHY_HAS_MAGICANEG 0x00000002 +#define PHY_IS_INTERNAL 0x00000004 /* Interface Mode definitions */ typedef enum { @@ -57,6 +58,7 @@ typedef enum { PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_SGMII, PHY_INTERFACE_MODE_TBI, + PHY_INTERFACE_MODE_REVMII, PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_RGMII_ID, @@ -261,6 +263,7 @@ struct phy_c45_device_ids { * phy_id: UID for this device found during discovery * c45_ids: 802.3-c45 Device Identifers if is_c45. * is_c45: Set to true if this phy uses clause 45 addressing. + * is_internal: Set to true if this phy is internal to a MAC. * state: state of the PHY for management purposes * dev_flags: Device-specific flags used by the PHY driver. * addr: Bus address of PHY @@ -298,6 +301,7 @@ struct phy_device { struct phy_c45_device_ids c45_ids; bool is_c45; + bool is_internal; enum phy_state state; @@ -508,6 +512,27 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) return mdiobus_write(phydev->bus, phydev->addr, regnum, val); } +/** + * phy_interrupt_is_valid - Convenience function for testing a given PHY irq + * @phydev: the phy_device struct + * + * NOTE: must be kept in sync with addition/removal of PHY_POLL and + * PHY_IGNORE_INTERRUPT + */ +static inline bool phy_interrupt_is_valid(struct phy_device *phydev) +{ + return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT; +} + +/** + * phy_is_internal - Convenience function for testing if a PHY is internal + * @phydev: the phy_device struct + */ +static inline bool phy_is_internal(struct phy_device *phydev) +{ + return phydev->is_internal; +} + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); @@ -545,6 +570,8 @@ void phy_drivers_unregister(struct phy_driver *drv, int n); int phy_driver_register(struct phy_driver *new_driver); int phy_drivers_register(struct phy_driver *new_driver, int n); void phy_state_machine(struct work_struct *work); +void phy_change(struct work_struct *work); +void phy_mac_interrupt(struct phy_device *phydev, int new_link); void phy_start_machine(struct phy_device *phydev, void (*handler)(struct net_device *)); void phy_stop_machine(struct phy_device *phydev); diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h index 1ade657..b717499 100644 --- a/include/linux/platform_data/brcmfmac-sdio.h +++ b/include/linux/platform_data/brcmfmac-sdio.h @@ -90,6 +90,10 @@ void __init brcmfmac_init_pdata(void) * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are * used for registering the irq using request_irq function. * + * broken_sg_support: flag for broken sg list support of SDIO host controller. + * Set this to true if the SDIO host controller has higher align requirement + * than 32 bytes for each scatterlist item. + * * power_on: This function is called by the brcmfmac when the module gets * loaded. This can be particularly useful for low power devices. The platform * spcific routine may for example decide to power up the complete device. @@ -116,6 +120,7 @@ struct brcmfmac_sdio_platform_data { bool oob_irq_supported; unsigned int oob_irq_nr; unsigned long oob_irq_flags; + bool broken_sg_support; void (*power_on)(void); void (*power_off)(void); void (*reset)(void); diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h new file mode 100644 index 0000000..6eba54a --- /dev/null +++ b/include/linux/platform_data/cyttsp4.h @@ -0,0 +1,76 @@ +/* + * Header file for: + * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. + * For use with Cypress Txx3xx parts. + * Supported parts include: + * CY8CTST341 + * CY8CTMA340 + * + * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. + * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, and only version 2, as published by the + * Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) + * + */ +#ifndef _CYTTSP4_H_ +#define _CYTTSP4_H_ + +#define CYTTSP4_MT_NAME "cyttsp4_mt" +#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter" +#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter" + +#define CY_TOUCH_SETTINGS_MAX 32 + +struct touch_framework { + const uint16_t *abs; + uint8_t size; + uint8_t enable_vkeys; +} __packed; + +struct cyttsp4_mt_platform_data { + struct touch_framework *frmwrk; + unsigned short flags; + char const *inp_dev_name; +}; + +struct touch_settings { + const uint8_t *data; + uint32_t size; + uint8_t tag; +} __packed; + +struct cyttsp4_core_platform_data { + int irq_gpio; + int rst_gpio; + int level_irq_udelay; + int (*xres)(struct cyttsp4_core_platform_data *pdata, + struct device *dev); + int (*init)(struct cyttsp4_core_platform_data *pdata, + int on, struct device *dev); + int (*power)(struct cyttsp4_core_platform_data *pdata, + int on, struct device *dev, atomic_t *ignore_irq); + int (*irq_stat)(struct cyttsp4_core_platform_data *pdata, + struct device *dev); + struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX]; +}; + +struct cyttsp4_platform_data { + struct cyttsp4_core_platform_data *core_pdata; + struct cyttsp4_mt_platform_data *mt_pdata; +}; + +#endif /* _CYTTSP4_H_ */ diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h index cab0997..e95f19c 100644 --- a/include/linux/platform_data/dma-atmel.h +++ b/include/linux/platform_data/dma-atmel.h @@ -35,16 +35,20 @@ struct at_dma_slave { /* Platform-configurable bits in CFG */ +#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ + #define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */ #define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */ #define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */ #define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */ #define ATC_SRC_H2SEL_SW (0x0 << 9) #define ATC_SRC_H2SEL_HW (0x1 << 9) +#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */ #define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */ #define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */ #define ATC_DST_H2SEL_SW (0x0 << 13) #define ATC_DST_H2SEL_HW (0x1 << 13) +#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */ #define ATC_SOD (0x1 << 16) /* Stop On Done */ #define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */ #define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */ diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index f6d30cc..beac6b8 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -60,10 +60,8 @@ static inline int imx_dma_is_ipu(struct dma_chan *chan) static inline int imx_dma_is_general_purpose(struct dma_chan *chan) { - return strstr(dev_name(chan->device->dev), "sdma") || - !strcmp(dev_name(chan->device->dev), "imx1-dma") || - !strcmp(dev_name(chan->device->dev), "imx21-dma") || - !strcmp(dev_name(chan->device->dev), "imx27-dma"); + return !strcmp(chan->device->dev->driver->name, "imx-sdma") || + !strcmp(chan->device->dev->driver->name, "imx-dma"); } #endif diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h index 5ce8d5e6..2462556 100644 --- a/include/linux/platform_data/keypad-pxa27x.h +++ b/include/linux/platform_data/keypad-pxa27x.h @@ -36,10 +36,9 @@ struct pxa27x_keypad_platform_data { /* code map for the matrix keys */ + const struct matrix_keymap_data *matrix_keymap_data; unsigned int matrix_key_rows; unsigned int matrix_key_cols; - unsigned int *matrix_key_map; - int matrix_key_map_size; /* direct keys */ int direct_key_num; diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index b4a0521..d44912d 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -40,5 +40,6 @@ struct esdhc_platform_data { enum wp_types wp_type; enum cd_types cd_type; int max_bus_width; + unsigned int f_max; }; #endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h index 9eb515b..1706b35 100644 --- a/include/linux/platform_data/mmc-pxamci.h +++ b/include/linux/platform_data/mmc-pxamci.h @@ -12,7 +12,7 @@ struct pxamci_platform_data { unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */ int (*init)(struct device *, irq_handler_t , void *); int (*get_ro)(struct device *); - void (*setpower)(struct device *, unsigned int); + int (*setpower)(struct device *, unsigned int); void (*exit)(struct device *, void *); int gpio_card_detect; /* gpio detecting card insertion */ int gpio_card_ro; /* gpio detecting read only toggle */ diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h new file mode 100644 index 0000000..c6fbc3c --- /dev/null +++ b/include/linux/platform_data/net-cw1200.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) ST-Ericsson SA 2011 + * + * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com> + * License terms: GNU General Public License (GPL) version 2 + */ + +#ifndef CW1200_PLAT_H_INCLUDED +#define CW1200_PLAT_H_INCLUDED + +struct cw1200_platform_data_spi { + u8 spi_bits_per_word; /* REQUIRED */ + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + +struct cw1200_platform_data_sdio { + u16 ref_clk; /* REQUIRED (in KHz) */ + + /* All others are optional */ + bool have_5ghz; + bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */ + int reset; /* GPIO to RSTn signal (0 disables) */ + int powerup; /* GPIO to POWERUP signal (0 disables) */ + int irq; /* IRQ line or 0 to use SDIO IRQ */ + int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control 3v3 / 1v8 supply */ + int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata, + bool enable); /* Control CLK32K */ + const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */ + const char *sdd_file; /* if NULL, will use default for detected hw type */ +}; + + +/* An example of SPI support in your board setup file: + + static struct cw1200_platform_data_spi cw1200_platform_data = { + .ref_clk = 38400, + .spi_bits_per_word = 16, + .reset = GPIO_RF_RESET, + .powerup = GPIO_RF_POWERUP, + .macaddr = wifi_mac_addr, + .sdd_file = "sdd_sagrad_1091_1098.bin", + }; + static struct spi_board_info myboard_spi_devices[] __initdata = { + { + .modalias = "cw1200_wlan_spi", + .max_speed_hz = 52000000, + .bus_num = 0, + .irq = WIFI_IRQ, + .platform_data = &cw1200_platform_data, + .chip_select = 0, + }, + }; + + */ + +/* An example of SDIO support in your board setup file: + + static struct cw1200_platform_data_sdio my_cw1200_platform_data = { + .ref_clk = 38400, + .have_5ghz = false, + .sdd_file = "sdd_myplatform.bin", + }; + cw1200_sdio_set_platform_data(&my_cw1200_platform_data); + + */ + +void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata); + +#endif /* CW1200_PLAT_H_INCLUDED */ diff --git a/include/linux/platform_data/pwm-renesas-tpu.h b/include/linux/platform_data/pwm-renesas-tpu.h new file mode 100644 index 0000000..a7220b1 --- /dev/null +++ b/include/linux/platform_data/pwm-renesas-tpu.h @@ -0,0 +1,16 @@ +#ifndef __PWM_RENESAS_TPU_H__ +#define __PWM_RENESAS_TPU_H__ + +#include <linux/pwm.h> + +#define TPU_CHANNEL_MAX 4 + +struct tpu_pwm_channel_data { + enum pwm_polarity polarity; +}; + +struct tpu_pwm_platform_data { + struct tpu_pwm_channel_data channels[TPU_CHANNEL_MAX]; +}; + +#endif /* __PWM_RENESAS_TPU_H__ */ diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h new file mode 100644 index 0000000..80587fd --- /dev/null +++ b/include/linux/platform_data/rcar-du.h @@ -0,0 +1,54 @@ +/* + * rcar_du.h -- R-Car Display Unit DRM driver + * + * Copyright (C) 2013 Renesas Corporation + * + * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __RCAR_DU_H__ +#define __RCAR_DU_H__ + +#include <drm/drm_mode.h> + +enum rcar_du_encoder_type { + RCAR_DU_ENCODER_UNUSED = 0, + RCAR_DU_ENCODER_VGA, + RCAR_DU_ENCODER_LVDS, +}; + +struct rcar_du_panel_data { + unsigned int width_mm; /* Panel width in mm */ + unsigned int height_mm; /* Panel height in mm */ + struct drm_mode_modeinfo mode; +}; + +struct rcar_du_encoder_lvds_data { + struct rcar_du_panel_data panel; +}; + +struct rcar_du_encoder_vga_data { + /* TODO: Add DDC information for EDID retrieval */ +}; + +struct rcar_du_encoder_data { + enum rcar_du_encoder_type encoder; + unsigned int output; + + union { + struct rcar_du_encoder_lvds_data lvds; + struct rcar_du_encoder_vga_data vga; + } u; +}; + +struct rcar_du_platform_data { + struct rcar_du_encoder_data *encoders; + unsigned int num_encoders; +}; + +#endif /* __RCAR_DU_H__ */ diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h index 3c1c644..bfbd12b 100644 --- a/include/linux/platform_data/remoteproc-omap.h +++ b/include/linux/platform_data/remoteproc-omap.h @@ -50,7 +50,7 @@ void __init omap_rproc_reserve_cma(void); #else -void __init omap_rproc_reserve_cma(void) +static inline void __init omap_rproc_reserve_cma(void) { } diff --git a/include/linux/platform_data/ti_am335x_adc.h b/include/linux/platform_data/ti_am335x_adc.h deleted file mode 100644 index e41d583..0000000 --- a/include/linux/platform_data/ti_am335x_adc.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef __LINUX_TI_AM335X_ADC_H -#define __LINUX_TI_AM335X_ADC_H - -/** - * struct adc_data ADC Input information - * @adc_channels: Number of analog inputs - * available for ADC. - */ - -struct adc_data { - unsigned int adc_channels; -}; - -#endif diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index cd46ee5..ce8e4ff 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -180,6 +180,9 @@ struct platform_driver { const struct platform_device_id *id_table; }; +#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \ + driver)) + /* * use a macro to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 569781f..a0f7080 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -36,8 +36,8 @@ * @last_time: Monotonic clock when the wakeup source's was touched last time. * @prevent_sleep_time: Total time this source has been preventing autosleep. * @event_count: Number of signaled wakeup events. - * @active_count: Number of times the wakeup sorce was activated. - * @relax_count: Number of times the wakeup sorce was deactivated. + * @active_count: Number of times the wakeup source was activated. + * @relax_count: Number of times the wakeup source was deactivated. * @expire_count: Number of times the wakeup source's timeout has expired. * @wakeup_count: Number of times the wakeup source might abort suspend. * @active: Status of the wakeup source. diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 7794d75..907f3fd 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -7,14 +7,20 @@ #include <linux/timex.h> #include <linux/alarmtimer.h> -union cpu_time_count { - cputime_t cpu; - unsigned long long sched; -}; + +static inline unsigned long long cputime_to_expires(cputime_t expires) +{ + return (__force unsigned long long)expires; +} + +static inline cputime_t expires_to_cputime(unsigned long long expires) +{ + return (__force cputime_t)expires; +} struct cpu_timer_list { struct list_head entry; - union cpu_time_count expires, incr; + unsigned long long expires, incr; struct task_struct *task; int firing; }; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 3828cef..804b906 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -162,6 +162,8 @@ union power_supply_propval { const char *strval; }; +struct device_node; + struct power_supply { const char *name; enum power_supply_type type; @@ -173,9 +175,7 @@ struct power_supply { char **supplied_from; size_t num_supplies; -#ifdef CONFIG_OF struct device_node *of_node; -#endif int (*get_property)(struct power_supply *psy, enum power_supply_property psp, diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 89573a3..07d0df6 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -142,9 +142,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) { INIT_LIST_HEAD(&child->ptrace_entry); INIT_LIST_HEAD(&child->ptraced); -#ifdef CONFIG_HAVE_HW_BREAKPOINT - atomic_set(&child->ptrace_bp_refcnt, 1); -#endif child->jobctl = 0; child->ptrace = 0; child->parent = child->real_parent; @@ -351,11 +348,4 @@ extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); -#ifdef CONFIG_HAVE_HW_BREAKPOINT -extern int ptrace_get_breakpoints(struct task_struct *tsk); -extern void ptrace_put_breakpoints(struct task_struct *tsk); -#else -static inline void ptrace_put_breakpoints(struct task_struct *tsk) { } -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ - #endif diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h index 0ca7582..a71d2db 100644 --- a/include/linux/pvclock_gtod.h +++ b/include/linux/pvclock_gtod.h @@ -3,6 +3,13 @@ #include <linux/notifier.h> +/* + * The pvclock gtod notifier is called when the system time is updated + * and is used to keep guest time synchronized with host time. + * + * The 'action' parameter in the notifier function is false (0), or + * true (non-zero) if system time was stepped. + */ extern int pvclock_gtod_register_notifier(struct notifier_block *nb); extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index a4df204..f0feafd 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -76,6 +76,7 @@ enum pwm_polarity { enum { PWMF_REQUESTED = 1 << 0, PWMF_ENABLED = 1 << 1, + PWMF_EXPORTED = 1 << 2, }; struct pwm_device { @@ -86,7 +87,9 @@ struct pwm_device { struct pwm_chip *chip; void *chip_data; - unsigned int period; /* in nanoseconds */ + unsigned int period; /* in nanoseconds */ + unsigned int duty_cycle; /* in nanoseconds */ + enum pwm_polarity polarity; }; static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) @@ -100,6 +103,17 @@ static inline unsigned int pwm_get_period(struct pwm_device *pwm) return pwm ? pwm->period : 0; } +static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) +{ + if (pwm) + pwm->duty_cycle = duty; +} + +static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm) +{ + return pwm ? pwm->duty_cycle : 0; +} + /* * pwm_set_polarity - configure the polarity of a PWM signal */ @@ -278,4 +292,17 @@ static inline void pwm_add_table(struct pwm_lookup *table, size_t num) } #endif +#ifdef CONFIG_PWM_SYSFS +void pwmchip_sysfs_export(struct pwm_chip *chip); +void pwmchip_sysfs_unexport(struct pwm_chip *chip); +#else +static inline void pwmchip_sysfs_export(struct pwm_chip *chip) +{ +} + +static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) +{ +} +#endif /* CONFIG_PWM_SYSFS */ + #endif /* __LINUX_PWM_H */ diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 23b3630..8e00f9f 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -10,6 +10,31 @@ #define SYS_HALT 0x0002 /* Notify of system halt */ #define SYS_POWER_OFF 0x0003 /* Notify of system power off */ +enum reboot_mode { + REBOOT_COLD = 0, + REBOOT_WARM, + REBOOT_HARD, + REBOOT_SOFT, + REBOOT_GPIO, +}; +extern enum reboot_mode reboot_mode; + +enum reboot_type { + BOOT_TRIPLE = 't', + BOOT_KBD = 'k', + BOOT_BIOS = 'b', + BOOT_ACPI = 'a', + BOOT_EFI = 'e', + BOOT_CF9 = 'p', + BOOT_CF9_COND = 'q', +}; +extern enum reboot_type reboot_type; + +extern int reboot_default; +extern int reboot_cpu; +extern int reboot_force; + + extern int register_reboot_notifier(struct notifier_block *); extern int unregister_reboot_notifier(struct notifier_block *); @@ -26,7 +51,7 @@ extern void machine_shutdown(void); struct pt_regs; extern void machine_crash_shutdown(struct pt_regs *); -/* +/* * Architecture independent implemenations of sys_reboot commands. */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 75981d0..580a532 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -15,6 +15,7 @@ #include <linux/list.h> #include <linux/rbtree.h> +#include <linux/err.h> struct module; struct device; diff --git a/include/linux/reservation.h b/include/linux/reservation.h new file mode 100644 index 0000000..813dae9 --- /dev/null +++ b/include/linux/reservation.h @@ -0,0 +1,62 @@ +/* + * Header file for reservations for dma-buf and ttm + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Copyright (C) 2012-2013 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark <rob.clark@linaro.org> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * Thomas Hellstrom <thellstrom-at-vmware-dot-com> + * + * Based on bo.c which bears the following copyright notice, + * but is dual licensed: + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _LINUX_RESERVATION_H +#define _LINUX_RESERVATION_H + +#include <linux/ww_mutex.h> + +extern struct ww_class reservation_ww_class; + +struct reservation_object { + struct ww_mutex lock; +}; + +static inline void +reservation_object_init(struct reservation_object *obj) +{ + ww_mutex_init(&obj->lock, &reservation_ww_class); +} + +static inline void +reservation_object_fini(struct reservation_object *obj) +{ + ww_mutex_destroy(&obj->lock); +} + +#endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 2680677..adae88f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -244,6 +244,11 @@ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen); +size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip); +size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, + void *buf, size_t buflen, off_t skip); + /* * Maximum number of entries that will be allocated in one piece, if * a list larger than this is required then chaining will be utilized. diff --git a/include/linux/sched.h b/include/linux/sched.h index cdd5407..d722490 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -322,8 +322,6 @@ extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern void arch_unmap_area(struct mm_struct *, unsigned long); -extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #else static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} #endif @@ -1401,9 +1399,6 @@ struct task_struct { } memcg_batch; unsigned int memcg_kmem_skip_account; #endif -#ifdef CONFIG_HAVE_HW_BREAKPOINT - atomic_t ptrace_bp_refcnt; -#endif #ifdef CONFIG_UPROBES struct uprobe_task *utask; #endif @@ -1633,6 +1628,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ +#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -2437,6 +2433,15 @@ extern int __cond_resched_softirq(void); __cond_resched_softirq(); \ }) +static inline void cond_resched_rcu(void) +{ +#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) + rcu_read_unlock(); + cond_resched(); + rcu_read_lock(); +#endif +} + /* * Does a critical section need to be broken due to another * task waiting?: (technically does not depend on CONFIG_PREEMPT, diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h new file mode 100644 index 0000000..fa7922c --- /dev/null +++ b/include/linux/sched_clock.h @@ -0,0 +1,21 @@ +/* + * sched_clock.h: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef LINUX_SCHED_CLOCK +#define LINUX_SCHED_CLOCK + +#ifdef CONFIG_GENERIC_SCHED_CLOCK +extern void sched_clock_postinit(void); +#else +static inline void sched_clock_postinit(void) { } +#endif + +extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); + +extern unsigned long long (*sched_clock_func)(void); + +#endif diff --git a/include/linux/security.h b/include/linux/security.h index 40560f4..7ce53ae 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -26,6 +26,7 @@ #include <linux/capability.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/string.h> struct linux_binprm; struct cred; @@ -60,6 +61,9 @@ struct mm_struct; #define SECURITY_CAP_NOAUDIT 0 #define SECURITY_CAP_AUDIT 1 +/* LSM Agnostic defines for sb_set_mnt_opts */ +#define SECURITY_LSM_NATIVE_LABELS 1 + struct ctl_table; struct audit_krule; struct user_namespace; @@ -306,6 +310,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Parse a string of security data filling in the opts structure * @options string containing all mount options known by the LSM * @opts binary data structure usable by the LSM + * @dentry_init_security: + * Compute a context for a dentry as the inode is not yet available + * since NFSv4 has no label backed by an EA anyway. + * @dentry dentry to use in calculating the context. + * @mode mode used to determine resource type. + * @name name of the last path component used to create file + * @ctx pointer to place the pointer to the resulting context in. + * @ctxlen point to place the length of the resulting context. + * * * Security hooks for inode operations. * @@ -1313,6 +1326,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @pages contains the number of pages. * Return 0 if permission is granted. * + * @ismaclabel: + * Check if the extended attribute specified by @name + * represents a MAC label. Returns 1 if name is a MAC + * attribute otherwise returns 0. + * @name full extended attribute name to check against + * LSM as a MAC label. + * * @secid_to_secctx: * Convert secid to security context. If secdata is NULL the length of * the result will be returned in seclen, but no secdata will be returned. @@ -1440,10 +1460,16 @@ struct security_operations { int (*sb_pivotroot) (struct path *old_path, struct path *new_path); int (*sb_set_mnt_opts) (struct super_block *sb, - struct security_mnt_opts *opts); + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags); int (*sb_clone_mnt_opts) (const struct super_block *oldsb, struct super_block *newsb); int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); + int (*dentry_init_security) (struct dentry *dentry, int mode, + struct qstr *name, void **ctx, + u32 *ctxlen); + #ifdef CONFIG_SECURITY_PATH int (*path_unlink) (struct path *dir, struct dentry *dentry); @@ -1591,6 +1617,7 @@ struct security_operations { int (*getprocattr) (struct task_struct *p, char *name, char **value); int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size); + int (*ismaclabel) (const char *name); int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen); int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); void (*release_secctx) (char *secdata, u32 seclen); @@ -1726,10 +1753,16 @@ int security_sb_mount(const char *dev_name, struct path *path, const char *type, unsigned long flags, void *data); int security_sb_umount(struct vfsmount *mnt, int flags); int security_sb_pivotroot(struct path *old_path, struct path *new_path); -int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts); +int security_sb_set_mnt_opts(struct super_block *sb, + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags); int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb); int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); +int security_dentry_init_security(struct dentry *dentry, int mode, + struct qstr *name, void **ctx, + u32 *ctxlen); int security_inode_alloc(struct inode *inode); void security_inode_free(struct inode *inode); @@ -1841,6 +1874,7 @@ void security_d_instantiate(struct dentry *dentry, struct inode *inode); int security_getprocattr(struct task_struct *p, char *name, char **value); int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); +int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); @@ -2012,7 +2046,9 @@ static inline int security_sb_pivotroot(struct path *old_path, } static inline int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts) + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags) { return 0; } @@ -2036,6 +2072,16 @@ static inline int security_inode_alloc(struct inode *inode) static inline void security_inode_free(struct inode *inode) { } +static inline int security_dentry_init_security(struct dentry *dentry, + int mode, + struct qstr *name, + void **ctx, + u32 *ctxlen) +{ + return -EOPNOTSUPP; +} + + static inline int security_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, @@ -2521,6 +2567,11 @@ static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb) return cap_netlink_send(sk, skb); } +static inline int security_ismaclabel(const char *name) +{ + return 0; +} + static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return -EOPNOTSUPP; diff --git a/include/linux/sem.h b/include/linux/sem.h index 53d4265..976ce3a 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h @@ -12,10 +12,12 @@ struct task_struct; struct sem_array { struct kern_ipc_perm ____cacheline_aligned_in_smp sem_perm; /* permissions .. see ipc.h */ - time_t sem_otime; /* last semop time */ time_t sem_ctime; /* last change time */ struct sem *sem_base; /* ptr to first semaphore in array */ - struct list_head sem_pending; /* pending operations to be processed */ + struct list_head pending_alter; /* pending operations */ + /* that alter the array */ + struct list_head pending_const; /* pending complex operations */ + /* that do not alter semvals */ struct list_head list_id; /* undo requests on this array */ int sem_nsems; /* no. of semaphores in array */ int complex_count; /* pending complex operations */ diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 2da29ac..4e32edc 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -173,4 +173,10 @@ extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, extern struct hlist_node *seq_hlist_next_rcu(void *v, struct hlist_head *head, loff_t *ppos); + +/* Helpers for iterating over per-cpu hlist_head-s in seq_files */ +extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos); + +extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); + #endif diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index b64d6be..4e83f3e 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -99,6 +99,4 @@ struct sh_dmae_pdata { #define CHCR_TE 0x00000002 #define CHCR_IE 0x00000004 -bool shdma_chan_filter(struct dma_chan *chan, void *arg); - #endif diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index a3728bf..5b1c984 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -68,6 +68,8 @@ struct shdma_chan { int id; /* Raw id of this channel */ int irq; /* Channel IRQ */ int slave_id; /* Client ID for slave DMA */ + int hw_req; /* DMA request line for slave DMA - same + * as MID/RID, used with DT */ enum shdma_pm_state pm_state; }; @@ -122,5 +124,10 @@ void shdma_chan_remove(struct shdma_chan *schan); int shdma_init(struct device *dev, struct shdma_dev *sdev, int chan_num); void shdma_cleanup(struct shdma_dev *sdev); +#if IS_ENABLED(CONFIG_SH_DMAE_BASE) +bool shdma_chan_filter(struct dma_chan *chan, void *arg); +#else +#define shdma_chan_filter NULL +#endif #endif diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dec1748..3b71a4e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -319,6 +319,8 @@ enum { SKB_GSO_GRE = 1 << 6, SKB_GSO_UDP_TUNNEL = 1 << 7, + + SKB_GSO_MPLS = 1 << 8, }; #if BITS_PER_LONG > 32 @@ -384,11 +386,13 @@ typedef unsigned char *sk_buff_data_t; * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @dma_cookie: a cookie to one of several possible DMA operations * done by skb DMA functions + * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking * @mark: Generic packet mark * @dropcount: total number of sk_receive_queue overflows * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information + * @inner_protocol: Protocol (encapsulation) * @inner_transport_header: Inner transport layer header (encapsulation) * @inner_network_header: Network layer header (encapsulation) * @inner_mac_header: Link layer header (encapsulation) @@ -497,8 +501,11 @@ struct sk_buff { /* 7/9 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); -#ifdef CONFIG_NET_DMA - dma_cookie_t dma_cookie; +#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL + union { + unsigned int napi_id; + dma_cookie_t dma_cookie; + }; #endif #ifdef CONFIG_NETWORK_SECMARK __u32 secmark; @@ -509,12 +516,13 @@ struct sk_buff { __u32 reserved_tailroom; }; - sk_buff_data_t inner_transport_header; - sk_buff_data_t inner_network_header; - sk_buff_data_t inner_mac_header; - sk_buff_data_t transport_header; - sk_buff_data_t network_header; - sk_buff_data_t mac_header; + __be16 inner_protocol; + __u16 inner_transport_header; + __u16 inner_network_header; + __u16 inner_mac_header; + __u16 transport_header; + __u16 network_header; + __u16 mac_header; /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; @@ -1388,6 +1396,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) skb_reset_tail_pointer(skb); skb->tail += offset; } + #else /* NET_SKBUFF_DATA_USES_OFFSET */ static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) { @@ -1528,7 +1537,6 @@ static inline void skb_reset_mac_len(struct sk_buff *skb) skb->mac_len = skb->network_header - skb->mac_header; } -#ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_inner_transport_header(const struct sk_buff *skb) { @@ -1582,7 +1590,7 @@ static inline void skb_set_inner_mac_header(struct sk_buff *skb, } static inline bool skb_transport_header_was_set(const struct sk_buff *skb) { - return skb->transport_header != ~0U; + return skb->transport_header != (typeof(skb->transport_header))~0U; } static inline unsigned char *skb_transport_header(const struct sk_buff *skb) @@ -1625,7 +1633,7 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb) static inline int skb_mac_header_was_set(const struct sk_buff *skb) { - return skb->mac_header != ~0U; + return skb->mac_header != (typeof(skb->mac_header))~0U; } static inline void skb_reset_mac_header(struct sk_buff *skb) @@ -1639,112 +1647,6 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) skb->mac_header += offset; } -#else /* NET_SKBUFF_DATA_USES_OFFSET */ -static inline unsigned char *skb_inner_transport_header(const struct sk_buff - *skb) -{ - return skb->inner_transport_header; -} - -static inline void skb_reset_inner_transport_header(struct sk_buff *skb) -{ - skb->inner_transport_header = skb->data; -} - -static inline void skb_set_inner_transport_header(struct sk_buff *skb, - const int offset) -{ - skb->inner_transport_header = skb->data + offset; -} - -static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) -{ - return skb->inner_network_header; -} - -static inline void skb_reset_inner_network_header(struct sk_buff *skb) -{ - skb->inner_network_header = skb->data; -} - -static inline void skb_set_inner_network_header(struct sk_buff *skb, - const int offset) -{ - skb->inner_network_header = skb->data + offset; -} - -static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) -{ - return skb->inner_mac_header; -} - -static inline void skb_reset_inner_mac_header(struct sk_buff *skb) -{ - skb->inner_mac_header = skb->data; -} - -static inline void skb_set_inner_mac_header(struct sk_buff *skb, - const int offset) -{ - skb->inner_mac_header = skb->data + offset; -} -static inline bool skb_transport_header_was_set(const struct sk_buff *skb) -{ - return skb->transport_header != NULL; -} - -static inline unsigned char *skb_transport_header(const struct sk_buff *skb) -{ - return skb->transport_header; -} - -static inline void skb_reset_transport_header(struct sk_buff *skb) -{ - skb->transport_header = skb->data; -} - -static inline void skb_set_transport_header(struct sk_buff *skb, - const int offset) -{ - skb->transport_header = skb->data + offset; -} - -static inline unsigned char *skb_network_header(const struct sk_buff *skb) -{ - return skb->network_header; -} - -static inline void skb_reset_network_header(struct sk_buff *skb) -{ - skb->network_header = skb->data; -} - -static inline void skb_set_network_header(struct sk_buff *skb, const int offset) -{ - skb->network_header = skb->data + offset; -} - -static inline unsigned char *skb_mac_header(const struct sk_buff *skb) -{ - return skb->mac_header; -} - -static inline int skb_mac_header_was_set(const struct sk_buff *skb) -{ - return skb->mac_header != NULL; -} - -static inline void skb_reset_mac_header(struct sk_buff *skb) -{ - skb->mac_header = skb->data; -} - -static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) -{ - skb->mac_header = skb->data + offset; -} -#endif /* NET_SKBUFF_DATA_USES_OFFSET */ - static inline void skb_probe_transport_header(struct sk_buff *skb, const int offset_hint) { @@ -2483,6 +2385,7 @@ extern void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); +extern void skb_scrub_packet(struct sk_buff *skb); extern struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/include/linux/slab.h b/include/linux/slab.h index 0c62175..6c5cc0e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -169,11 +169,7 @@ struct kmem_cache { struct list_head list; /* List of all slab caches on the system */ }; -#define KMALLOC_MAX_SIZE (1UL << 30) - -#include <linux/slob_def.h> - -#else /* CONFIG_SLOB */ +#endif /* CONFIG_SLOB */ /* * Kmalloc array related definitions @@ -195,7 +191,9 @@ struct kmem_cache { #ifndef KMALLOC_SHIFT_LOW #define KMALLOC_SHIFT_LOW 5 #endif -#else +#endif + +#ifdef CONFIG_SLUB /* * SLUB allocates up to order 2 pages directly and otherwise * passes the request to the page allocator. @@ -207,6 +205,19 @@ struct kmem_cache { #endif #endif +#ifdef CONFIG_SLOB +/* + * SLOB passes all page size and larger requests to the page allocator. + * No kmalloc array is necessary since objects of different sizes can + * be allocated from the same page. + */ +#define KMALLOC_SHIFT_MAX 30 +#define KMALLOC_SHIFT_HIGH PAGE_SHIFT +#ifndef KMALLOC_SHIFT_LOW +#define KMALLOC_SHIFT_LOW 3 +#endif +#endif + /* Maximum allocatable size */ #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) /* Maximum size for which we actually use a slab cache */ @@ -221,6 +232,7 @@ struct kmem_cache { #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) #endif +#ifndef CONFIG_SLOB extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; #ifdef CONFIG_ZONE_DMA extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; @@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size) /* Will never be reached. Needed because the compiler may complain */ return -1; } +#endif /* !CONFIG_SLOB */ #ifdef CONFIG_SLAB #include <linux/slab_def.h> -#elif defined(CONFIG_SLUB) +#endif + +#ifdef CONFIG_SLUB #include <linux/slub_def.h> -#else -#error "Unknown slab allocator" +#endif + +#ifdef CONFIG_SLOB +#include <linux/slob_def.h> #endif /* @@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size) */ static __always_inline int kmalloc_size(int n) { +#ifndef CONFIG_SLOB if (n > 2) return 1 << n; @@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n) if (n == 2 && KMALLOC_MIN_SIZE <= 64) return 192; - +#endif return 0; } -#endif /* !CONFIG_SLOB */ /* * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. @@ -356,9 +373,8 @@ int cache_show(struct kmem_cache *s, struct seq_file *m); void print_slabinfo_header(struct seq_file *m); /** - * kmalloc_array - allocate memory for an array. - * @n: number of elements. - * @size: element size. + * kmalloc - allocate memory + * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * The @flags argument may be one of: @@ -405,6 +421,17 @@ void print_slabinfo_header(struct seq_file *m); * There are other flags available as well, but these are not intended * for general use, and so are not documented here. For a full list of * potential flags, always refer to linux/gfp.h. + * + * kmalloc is the normal method of allocating memory + * in the kernel. + */ +static __always_inline void *kmalloc(size_t size, gfp_t flags); + +/** + * kmalloc_array - allocate memory for an array. + * @n: number of elements. + * @size: element size. + * @flags: the type of memory to allocate (see kmalloc). */ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { @@ -428,7 +455,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) /** * kmalloc_node - allocate memory from a specific node * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kcalloc). + * @flags: the type of memory to allocate (see kmalloc). * @node: node to allocate from. * * kmalloc() for non-local nodes, used to allocate from a specific node diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index f28e14a..095a5a4 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h @@ -18,14 +18,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -/** - * kmalloc - allocate memory - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kcalloc). - * - * kmalloc is the normal method of allocating memory - * in the kernel. - */ static __always_inline void *kmalloc(size_t size, gfp_t flags) { return __kmalloc_node(size, flags, NUMA_NO_NODE); diff --git a/include/linux/socket.h b/include/linux/socket.h index b10ce4b..230c04b 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -167,6 +167,7 @@ struct ucred { #define AF_PPPOX 24 /* PPPoX sockets */ #define AF_WANPIPE 25 /* Wanpipe API Sockets */ #define AF_LLC 26 /* Linux LLC */ +#define AF_IB 27 /* Native InfiniBand address */ #define AF_CAN 29 /* Controller Area Network */ #define AF_TIPC 30 /* TIPC sockets */ #define AF_BLUETOOTH 31 /* Bluetooth sockets */ @@ -211,6 +212,7 @@ struct ucred { #define PF_PPPOX AF_PPPOX #define PF_WANPIPE AF_WANPIPE #define PF_LLC AF_LLC +#define PF_IB AF_IB #define PF_CAN AF_CAN #define PF_TIPC AF_TIPC #define PF_BLUETOOTH AF_BLUETOOTH diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 51df117..bdb9993 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -144,7 +144,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); } -#endif /* CONFIG_PREEMPT */ +#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ static inline void __raw_spin_unlock(raw_spinlock_t *lock) { diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h index afe79d4..6535e47 100644 --- a/include/linux/ssb/ssb_driver_mips.h +++ b/include/linux/ssb/ssb_driver_mips.h @@ -20,6 +20,18 @@ struct ssb_pflash { u32 window_size; }; +#ifdef CONFIG_SSB_SFLASH +struct ssb_sflash { + bool present; + u32 window; + u32 blocksize; + u16 numblocks; + u32 size; + + void *priv; +}; +#endif + struct ssb_mipscore { struct ssb_device *dev; @@ -27,6 +39,9 @@ struct ssb_mipscore { struct ssb_serial_port serial_ports[4]; struct ssb_pflash pflash; +#ifdef CONFIG_SSB_SFLASH + struct ssb_sflash sflash; +#endif }; extern void ssb_mipscore_init(struct ssb_mipscore *mcore); diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index 3a72569..f9f931c 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -172,6 +172,7 @@ #define SSB_SPROMSIZE_WORDS_R4 220 #define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16)) #define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16)) +#define SSB_SPROMSIZE_WORDS_R10 230 #define SSB_SPROM_BASE1 0x1000 #define SSB_SPROM_BASE31 0x0800 #define SSB_SPROM_REVISION 0x007E diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c1b3ed3..9e495d31 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -80,6 +80,10 @@ struct stmmac_mdio_bus_data { unsigned int phy_mask; int *irqs; int probed_phy_irq; +#ifdef CONFIG_OF + int reset_gpio, active_low; + u32 delays[3]; +#endif }; struct stmmac_dma_cfg { diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 303399b..6ce690d 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -57,6 +57,7 @@ struct cache_head { #define CACHE_VALID 0 /* Entry contains valid data */ #define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */ #define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/ +#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */ #define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ @@ -148,6 +149,24 @@ struct cache_deferred_req { int too_many); }; +/* + * timestamps kept in the cache are expressed in seconds + * since boot. This is the best for measuring differences in + * real time. + */ +static inline time_t seconds_since_boot(void) +{ + struct timespec boot; + getboottime(&boot); + return get_seconds() - boot.tv_sec; +} + +static inline time_t convert_to_wallclock(time_t sinceboot) +{ + struct timespec boot; + getboottime(&boot); + return boot.tv_sec + sinceboot; +} extern const struct file_operations cache_file_operations_pipefs; extern const struct file_operations content_file_operations_pipefs; @@ -181,15 +200,10 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) kref_put(&h->ref, cd->cache_put); } -static inline int cache_valid(struct cache_head *h) +static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) { - /* If an item has been unhashed pending removal when - * the refcount drops to 0, the expiry_time will be - * set to 0. We don't want to consider such items - * valid in this context even though CACHE_VALID is - * set. - */ - return (h->expiry_time != 0 && test_bit(CACHE_VALID, &h->flags)); + return (h->expiry_time < seconds_since_boot()) || + (detail->flush_time > h->last_refresh); } extern int cache_check(struct cache_detail *detail, @@ -250,25 +264,6 @@ static inline int get_uint(char **bpp, unsigned int *anint) return 0; } -/* - * timestamps kept in the cache are expressed in seconds - * since boot. This is the best for measuring differences in - * real time. - */ -static inline time_t seconds_since_boot(void) -{ - struct timespec boot; - getboottime(&boot); - return get_seconds() - boot.tv_sec; -} - -static inline time_t convert_to_wallclock(time_t sinceboot) -{ - struct timespec boot; - getboottime(&boot); - return boot.tv_sec + sinceboot; -} - static inline time_t get_expiry(char **bpp) { int rv; diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 161463e..1f911cc 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -151,6 +151,8 @@ struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32); /* Fill in an array with a list of supported pseudoflavors */ int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int); +struct gss_api_mech * gss_mech_get(struct gss_api_mech *); + /* For every successful gss_mech_get or gss_mech_get_by_* call there must be a * corresponding call to gss_mech_put. */ void gss_mech_put(struct gss_api_mech *); diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index a7b422b..aa5b582 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h @@ -73,12 +73,12 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *, extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *); struct rpc_clnt; -extern struct dentry *rpc_create_client_dir(struct dentry *, struct qstr *, struct rpc_clnt *); +extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *); extern int rpc_remove_client_dir(struct dentry *); struct cache_detail; extern struct dentry *rpc_create_cache_dir(struct dentry *, - struct qstr *, + const char *, umode_t umode, struct cache_detail *); extern void rpc_remove_cache_dir(struct dentry *); diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 84ca436..6d87035 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -88,15 +88,6 @@ struct rpc_task { tk_rebind_retry : 2; }; -/* support walking a list of tasks on a wait queue */ -#define task_for_each(task, pos, head) \ - list_for_each(pos, head) \ - if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1) - -#define task_for_first(task, head) \ - if (!list_empty(head) && \ - ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) - typedef void (*rpc_action)(struct rpc_task *); struct rpc_call_ops { @@ -238,7 +229,6 @@ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, bool (*)(struct rpc_task *, void *), void *); void rpc_wake_up_status(struct rpc_wait_queue *, int); -int rpc_queue_empty(struct rpc_wait_queue *); void rpc_delay(struct rpc_task *, unsigned long); void * rpc_malloc(struct rpc_task *, size_t); void rpc_free(void *); @@ -259,16 +249,6 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) return __rpc_wait_for_completion_task(task, NULL); } -static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio) -{ - task->tk_priority = prio - RPC_PRIORITY_LOW; -} - -static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio) -{ - return (task->tk_priority + RPC_PRIORITY_LOW == prio); -} - #if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) static inline const char * rpc_qname(const struct rpc_wait_queue *q) { diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index ff374ab..8d71d65 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -14,6 +14,7 @@ #include <linux/string.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/cache.h> +#include <linux/sunrpc/gss_api.h> #include <linux/hash.h> #include <linux/cred.h> @@ -23,13 +24,23 @@ struct svc_cred { struct group_info *cr_group_info; u32 cr_flavor; /* pseudoflavor */ char *cr_principal; /* for gss */ + struct gss_api_mech *cr_gss_mech; }; +static inline void init_svc_cred(struct svc_cred *cred) +{ + cred->cr_group_info = NULL; + cred->cr_principal = NULL; + cred->cr_gss_mech = NULL; +} + static inline void free_svc_cred(struct svc_cred *cred) { if (cred->cr_group_info) put_group_info(cred->cr_group_info); kfree(cred->cr_principal); + gss_mech_put(cred->cr_gss_mech); + init_svc_cred(cred); } struct svc_rqst; /* forward decl */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index e2cee22..9e8a9b5 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -17,10 +17,12 @@ #include <linux/list.h> #include <linux/lockdep.h> #include <linux/kobject_ns.h> +#include <linux/stat.h> #include <linux/atomic.h> struct kobject; struct module; +struct bin_attribute; enum kobj_ns_type; struct attribute { @@ -59,26 +61,28 @@ struct attribute_group { umode_t (*is_visible)(struct kobject *, struct attribute *, int); struct attribute **attrs; + struct bin_attribute **bin_attrs; }; - - /** * Use these macros to make defining attributes easier. See include/linux/device.h * for examples.. */ -#define __ATTR(_name,_mode,_show,_store) { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ +#define __ATTR(_name,_mode,_show,_store) { \ + .attr = {.name = __stringify(_name), .mode = _mode }, \ + .show = _show, \ + .store = _store, \ } -#define __ATTR_RO(_name) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = _name##_show, \ +#define __ATTR_RO(_name) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .show = _name##_show, \ } +#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ + _name##_show, _name##_store) + #define __ATTR_NULL { .attr = { .name = NULL } } #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -92,6 +96,18 @@ struct attribute_group { #define __ATTR_IGNORE_LOCKDEP __ATTR #endif +#define __ATTRIBUTE_GROUPS(_name) \ +static const struct attribute_group *_name##_groups[] = { \ + &_name##_group, \ + NULL, \ +} + +#define ATTRIBUTE_GROUPS(_name) \ +static const struct attribute_group _name##_group = { \ + .attrs = _name##_attrs, \ +}; \ +__ATTRIBUTE_GROUPS(_name) + #define attr_name(_attr) (_attr).attr.name struct file; @@ -121,6 +137,36 @@ struct bin_attribute { */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) +/* macros to create static binary attributes easier */ +#define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .read = _read, \ + .write = _write, \ + .size = _size, \ +} + +#define __BIN_ATTR_RO(_name, _size) { \ + .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ + .read = _name##_read, \ + .size = _size, \ +} + +#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \ + (S_IWUSR | S_IRUGO), _name##_read, \ + _name##_write) + +#define __BIN_ATTR_NULL __ATTR_NULL + +#define BIN_ATTR(_name, _mode, _read, _write, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \ + _write, _size) + +#define BIN_ATTR_RO(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size) + +#define BIN_ATTR_RW(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) + struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *,char *); ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 5adbc33..472120b 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -246,7 +246,6 @@ struct tcp_sock { /* from STCP, retrans queue hinting */ struct sk_buff* lost_skb_hint; - struct sk_buff *scoreboard_skb_hint; struct sk_buff *retransmit_skb_hint; struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ diff --git a/include/linux/tick.h b/include/linux/tick.h index 9180f4b..62bd8b7 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { } #endif -# ifdef CONFIG_CPU_IDLE_GOV_MENU -extern void menu_hrtimer_cancel(void); -# else -static inline void menu_hrtimer_cancel(void) {} -# endif /* CONFIG_CPU_IDLE_GOV_MENU */ - #endif diff --git a/include/linux/usb.h b/include/linux/usb.h index a232b7e..0eec268 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -367,17 +367,6 @@ struct usb_bus { /* ----------------------------------------------------------------------- */ -/* This is arbitrary. - * From USB 2.0 spec Table 11-13, offset 7, a hub can - * have up to 255 ports. The most yet reported is 10. - * - * Current Wireless USB host hardware (Intel i1480 for example) allows - * up to 22 devices to connect. Upcoming hardware might raise that - * limit. Because the arrays need to add a bit for hub status data, we - * do 31, so plus one evens out to four bytes. - */ -#define USB_MAXCHILDREN (31) - struct usb_tt; enum usb_device_removable { diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index b6b215f..14105c2 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -23,6 +23,7 @@ struct user_namespace { struct uid_gid_map projid_map; atomic_t count; struct user_namespace *parent; + int level; kuid_t owner; kgid_t group; unsigned int proc_inum; diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index ea7168a..617c01b 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -15,6 +15,7 @@ #define _LINUX_VEXPRESS_H #include <linux/device.h> +#include <linux/reboot.h> #define VEXPRESS_SITE_MB 0 #define VEXPRESS_SITE_DB1 1 diff --git a/include/linux/virtio.h b/include/linux/virtio.h index e94c75d..36d36cc 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -63,6 +63,10 @@ void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); +unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); + +bool virtqueue_poll(struct virtqueue *vq, unsigned); + bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index ca3ad41..b300787 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -1,6 +1,7 @@ #ifndef _LINUX_VIRTIO_RING_H #define _LINUX_VIRTIO_RING_H +#include <asm/barrier.h> #include <linux/irqreturn.h> #include <uapi/linux/virtio_ring.h> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index dd0a2c8..4b8a891 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -10,12 +10,12 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */ /* bits in flags of vmalloc's vm_struct below */ -#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ -#define VM_ALLOC 0x00000002 /* vmalloc() */ -#define VM_MAP 0x00000004 /* vmap()ed pages */ -#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ -#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ -#define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ +#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ +#define VM_ALLOC 0x00000002 /* vmalloc() */ +#define VM_MAP 0x00000004 /* vmap()ed pages */ +#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ +#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ +#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ /* bits [20..32] reserved for arch specific ioremap internals */ /* diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 76be077..7dc17e2 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -12,7 +12,7 @@ struct vmpressure { unsigned long scanned; unsigned long reclaimed; /* The lock is used to keep the scanned/reclaimed above in sync. */ - struct mutex sr_lock; + struct spinlock sr_lock; /* The list of vmpressure_event structs. */ struct list_head events; @@ -30,6 +30,7 @@ extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); extern void vmpressure_init(struct vmpressure *vmpr); +extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index abfe117..4e198ca 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -47,11 +47,16 @@ enum wb_reason { WB_REASON_LAPTOP_TIMER, WB_REASON_FREE_MORE_MEM, WB_REASON_FS_FREE_SPACE, + /* + * There is no bdi forker thread any more and works are done + * by emergency worker, however, this is TPs userland visible + * and we'll be exposing exactly the same information, + * so it has a mismatch name. + */ WB_REASON_FORKER_THREAD, WB_REASON_MAX, }; -extern const char *wb_reason_name[]; /* * A control structure which tells the writeback code what to do. These are @@ -95,7 +100,6 @@ int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, void sync_inodes_sb(struct super_block *); long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, enum wb_reason reason); -long wb_do_writeback(struct bdi_writeback *wb, int force_wait); void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); void inode_wait_for_writeback(struct inode *inode); diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h new file mode 100644 index 0000000..760399a --- /dev/null +++ b/include/linux/ww_mutex.h @@ -0,0 +1,378 @@ +/* + * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance + * + * Original mutex implementation started by Ingo Molnar: + * + * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * + * Wound/wait implementation: + * Copyright (C) 2013 Canonical Ltd. + * + * This file contains the main data structure and API definitions. + */ + +#ifndef __LINUX_WW_MUTEX_H +#define __LINUX_WW_MUTEX_H + +#include <linux/mutex.h> + +struct ww_class { + atomic_long_t stamp; + struct lock_class_key acquire_key; + struct lock_class_key mutex_key; + const char *acquire_name; + const char *mutex_name; +}; + +struct ww_acquire_ctx { + struct task_struct *task; + unsigned long stamp; + unsigned acquired; +#ifdef CONFIG_DEBUG_MUTEXES + unsigned done_acquire; + struct ww_class *ww_class; + struct ww_mutex *contending_lock; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned deadlock_inject_interval; + unsigned deadlock_inject_countdown; +#endif +}; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +#ifdef CONFIG_DEBUG_MUTEXES + struct ww_class *ww_class; +#endif +}; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ + , .ww_class = &ww_class +#else +# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) +#endif + +#define __WW_CLASS_INITIALIZER(ww_class) \ + { .stamp = ATOMIC_LONG_INIT(0) \ + , .acquire_name = #ww_class "_acquire" \ + , .mutex_name = #ww_class "_mutex" } + +#define __WW_MUTEX_INITIALIZER(lockname, class) \ + { .base = { \__MUTEX_INITIALIZER(lockname) } \ + __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } + +#define DEFINE_WW_CLASS(classname) \ + struct ww_class classname = __WW_CLASS_INITIALIZER(classname) + +#define DEFINE_WW_MUTEX(mutexname, ww_class) \ + struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) + +/** + * ww_mutex_init - initialize the w/w mutex + * @lock: the mutex to be initialized + * @ww_class: the w/w class the mutex should belong to + * + * Initialize the w/w mutex to unlocked state and associate it with the given + * class. + * + * It is not allowed to initialize an already locked mutex. + */ +static inline void ww_mutex_init(struct ww_mutex *lock, + struct ww_class *ww_class) +{ + __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); + lock->ctx = NULL; +#ifdef CONFIG_DEBUG_MUTEXES + lock->ww_class = ww_class; +#endif +} + +/** + * ww_acquire_init - initialize a w/w acquire context + * @ctx: w/w acquire context to initialize + * @ww_class: w/w class of the context + * + * Initializes an context to acquire multiple mutexes of the given w/w class. + * + * Context-based w/w mutex acquiring can be done in any order whatsoever within + * a given lock class. Deadlocks will be detected and handled with the + * wait/wound logic. + * + * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can + * result in undetected deadlocks and is so forbidden. Mixing different contexts + * for the same w/w class when acquiring mutexes can also result in undetected + * deadlocks, and is hence also forbidden. Both types of abuse will be caught by + * enabling CONFIG_PROVE_LOCKING. + * + * Nesting of acquire contexts for _different_ w/w classes is possible, subject + * to the usual locking rules between different lock classes. + * + * An acquire context must be released with ww_acquire_fini by the same task + * before the memory is freed. It is recommended to allocate the context itself + * on the stack. + */ +static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, + struct ww_class *ww_class) +{ + ctx->task = current; + ctx->stamp = atomic_long_inc_return(&ww_class->stamp); + ctx->acquired = 0; +#ifdef CONFIG_DEBUG_MUTEXES + ctx->ww_class = ww_class; + ctx->done_acquire = 0; + ctx->contending_lock = NULL; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); + lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, + &ww_class->acquire_key, 0); + mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); +#endif +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + ctx->deadlock_inject_interval = 1; + ctx->deadlock_inject_countdown = ctx->stamp & 0xf; +#endif +} + +/** + * ww_acquire_done - marks the end of the acquire phase + * @ctx: the acquire context + * + * Marks the end of the acquire phase, any further w/w mutex lock calls using + * this context are forbidden. + * + * Calling this function is optional, it is just useful to document w/w mutex + * code and clearly designated the acquire phase from actually using the locked + * data structures. + */ +static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_MUTEXES + lockdep_assert_held(ctx); + + DEBUG_LOCKS_WARN_ON(ctx->done_acquire); + ctx->done_acquire = 1; +#endif +} + +/** + * ww_acquire_fini - releases a w/w acquire context + * @ctx: the acquire context to free + * + * Releases a w/w acquire context. This must be called _after_ all acquired w/w + * mutexes have been released with ww_mutex_unlock. + */ +static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_MUTEXES + mutex_release(&ctx->dep_map, 0, _THIS_IP_); + + DEBUG_LOCKS_WARN_ON(ctx->acquired); + if (!config_enabled(CONFIG_PROVE_LOCKING)) + /* + * lockdep will normally handle this, + * but fail without anyway + */ + ctx->done_acquire = 1; + + if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) + /* ensure ww_acquire_fini will still fail if called twice */ + ctx->acquired = ~0U; +#endif +} + +extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx); +extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx); + +/** + * ww_mutex_lock - acquire the w/w mutex + * @lock: the mutex to be acquired + * @ctx: w/w acquire context, or NULL to acquire only a single lock. + * + * Lock the w/w mutex exclusively for this task. + * + * Deadlocks within a given w/w class of locks are detected and handled with the + * wait/wound algorithm. If the lock isn't immediately avaiable this function + * will either sleep until it is (wait case). Or it selects the current context + * for backing off by returning -EDEADLK (wound case). Trying to acquire the + * same lock with the same context twice is also detected and signalled by + * returning -EALREADY. Returns 0 if the mutex was successfully acquired. + * + * In the wound case the caller must release all currently held w/w mutexes for + * the given context and then wait for this contending lock to be available by + * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this + * lock and proceed with trying to acquire further w/w mutexes (e.g. when + * scanning through lru lists trying to free resources). + * + * The mutex must later on be released by the same task that + * acquired it. The task may not exit without first unlocking the mutex. Also, + * kernel memory where the mutex resides must not be freed with the mutex still + * locked. The mutex must first be initialized (or statically defined) before it + * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be + * of the same w/w lock class as was used to initialize the acquire context. + * + * A mutex acquired with this function must be released with ww_mutex_unlock. + */ +static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ + if (ctx) + return __ww_mutex_lock(lock, ctx); + + mutex_lock(&lock->base); + return 0; +} + +/** + * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible + * @lock: the mutex to be acquired + * @ctx: w/w acquire context + * + * Lock the w/w mutex exclusively for this task. + * + * Deadlocks within a given w/w class of locks are detected and handled with the + * wait/wound algorithm. If the lock isn't immediately avaiable this function + * will either sleep until it is (wait case). Or it selects the current context + * for backing off by returning -EDEADLK (wound case). Trying to acquire the + * same lock with the same context twice is also detected and signalled by + * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a + * signal arrives while waiting for the lock then this function returns -EINTR. + * + * In the wound case the caller must release all currently held w/w mutexes for + * the given context and then wait for this contending lock to be available by + * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to + * not acquire this lock and proceed with trying to acquire further w/w mutexes + * (e.g. when scanning through lru lists trying to free resources). + * + * The mutex must later on be released by the same task that + * acquired it. The task may not exit without first unlocking the mutex. Also, + * kernel memory where the mutex resides must not be freed with the mutex still + * locked. The mutex must first be initialized (or statically defined) before it + * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be + * of the same w/w lock class as was used to initialize the acquire context. + * + * A mutex acquired with this function must be released with ww_mutex_unlock. + */ +static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx) +{ + if (ctx) + return __ww_mutex_lock_interruptible(lock, ctx); + else + return mutex_lock_interruptible(&lock->base); +} + +/** + * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex + * @lock: the mutex to be acquired + * @ctx: w/w acquire context + * + * Acquires a w/w mutex with the given context after a wound case. This function + * will sleep until the lock becomes available. + * + * The caller must have released all w/w mutexes already acquired with the + * context and then call this function on the contended lock. + * + * Afterwards the caller may continue to (re)acquire the other w/w mutexes it + * needs with ww_mutex_lock. Note that the -EALREADY return code from + * ww_mutex_lock can be used to avoid locking this contended mutex twice. + * + * It is forbidden to call this function with any other w/w mutexes associated + * with the context held. It is forbidden to call this on anything else than the + * contending mutex. + * + * Note that the slowpath lock acquiring can also be done by calling + * ww_mutex_lock directly. This function here is simply to help w/w mutex + * locking code readability by clearly denoting the slowpath. + */ +static inline void +ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ + int ret; +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); +#endif + ret = ww_mutex_lock(lock, ctx); + (void)ret; +} + +/** + * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible + * @lock: the mutex to be acquired + * @ctx: w/w acquire context + * + * Acquires a w/w mutex with the given context after a wound case. This function + * will sleep until the lock becomes available and returns 0 when the lock has + * been acquired. If a signal arrives while waiting for the lock then this + * function returns -EINTR. + * + * The caller must have released all w/w mutexes already acquired with the + * context and then call this function on the contended lock. + * + * Afterwards the caller may continue to (re)acquire the other w/w mutexes it + * needs with ww_mutex_lock. Note that the -EALREADY return code from + * ww_mutex_lock can be used to avoid locking this contended mutex twice. + * + * It is forbidden to call this function with any other w/w mutexes associated + * with the given context held. It is forbidden to call this on anything else + * than the contending mutex. + * + * Note that the slowpath lock acquiring can also be done by calling + * ww_mutex_lock_interruptible directly. This function here is simply to help + * w/w mutex locking code readability by clearly denoting the slowpath. + */ +static inline int __must_check +ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, + struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); +#endif + return ww_mutex_lock_interruptible(lock, ctx); +} + +extern void ww_mutex_unlock(struct ww_mutex *lock); + +/** + * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context + * @lock: mutex to lock + * + * Trylocks a mutex without acquire context, so no deadlock detection is + * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. + */ +static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) +{ + return mutex_trylock(&lock->base); +} + +/*** + * ww_mutex_destroy - mark a w/w mutex unusable + * @lock: the mutex to be destroyed + * + * This function marks the mutex uninitialized, and any subsequent + * use of the mutex is forbidden. The mutex must not be locked when + * this function is called. + */ +static inline void ww_mutex_destroy(struct ww_mutex *lock) +{ + mutex_destroy(&lock->base); +} + +/** + * ww_mutex_is_locked - is the w/w mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline bool ww_mutex_is_locked(struct ww_mutex *lock) +{ + return mutex_is_locked(&lock->base); +} + +#endif diff --git a/include/linux/zbud.h b/include/linux/zbud.h new file mode 100644 index 0000000..2571a5c --- /dev/null +++ b/include/linux/zbud.h @@ -0,0 +1,22 @@ +#ifndef _ZBUD_H_ +#define _ZBUD_H_ + +#include <linux/types.h> + +struct zbud_pool; + +struct zbud_ops { + int (*evict)(struct zbud_pool *pool, unsigned long handle); +}; + +struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops); +void zbud_destroy_pool(struct zbud_pool *pool); +int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp, + unsigned long *handle); +void zbud_free(struct zbud_pool *pool, unsigned long handle); +int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries); +void *zbud_map(struct zbud_pool *pool, unsigned long handle); +void zbud_unmap(struct zbud_pool *pool, unsigned long handle); +u64 zbud_get_pool_size(struct zbud_pool *pool); + +#endif /* _ZBUD_H_ */ |