summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cputime.h4
-rw-r--r--include/asm-generic/fcntl.h5
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_dp_helper.h61
-rw-r--r--include/drm/drm_edid.h9
-rw-r--r--include/drm/exynos_drm.h104
-rw-r--r--include/drm/intel-gtt.h2
-rw-r--r--include/drm/vmwgfx_drm.h358
-rw-r--r--include/linux/alarmtimer.h51
-rw-r--r--include/linux/bma150.h46
-rw-r--r--include/linux/can.h2
-rw-r--r--include/linux/can/bcm.h2
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/error.h2
-rw-r--r--include/linux/can/gw.h2
-rw-r--r--include/linux/can/netlink.h2
-rw-r--r--include/linux/can/raw.h2
-rw-r--r--include/linux/capability.h3
-rw-r--r--include/linux/ceph/messenger.h1
-rw-r--r--include/linux/clk.h43
-rw-r--r--include/linux/clkdev.h7
-rw-r--r--include/linux/clockchips.h12
-rw-r--r--include/linux/cpu_pm.h109
-rw-r--r--include/linux/devfreq.h238
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dma_remapping.h10
-rw-r--r--include/linux/dmar.h43
-rw-r--r--include/linux/drbd_tag_magic.h2
-rw-r--r--include/linux/dw_apb_timer.h2
-rw-r--r--include/linux/dynamic_debug.h76
-rw-r--r--include/linux/evm.h100
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/freezer.h26
-rw-r--r--include/linux/fs.h36
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/hyperv.h873
-rw-r--r--include/linux/i2c/tsc2007.h2
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/ima.h13
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/input.h3
-rw-r--r--include/linux/input/adp5589.h157
-rw-r--r--include/linux/input/adxl34x.h21
-rw-r--r--include/linux/integrity.h39
-rw-r--r--include/linux/intel-iommu.h10
-rw-r--r--include/linux/interrupt.h41
-rw-r--r--include/linux/io-mapping.h6
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irq_work.h15
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/isdn.h2
-rw-r--r--include/linux/jiffies.h2
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/llist.h77
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mfd/wm831x/core.h12
-rw-r--r--include/linux/mfd/wm831x/pdata.h3
-rw-r--r--include/linux/mfd/wm8400-private.h7
-rw-r--r--include/linux/mfd/wm8994/core.h9
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm_types.h25
-rw-r--r--include/linux/mod_devicetable.h9
-rw-r--r--include/linux/module.h12
-rw-r--r--include/linux/moduleparam.h20
-rw-r--r--include/linux/net_tstamp.h4
-rw-r--r--include/linux/netdevice.h14
-rw-r--r--include/linux/nfs4.h21
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_page.h1
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/nfsd/Kbuild2
-rw-r--r--include/linux/nfsd/const.h55
-rw-r--r--include/linux/nfsd/export.h2
-rw-r--r--include/linux/nfsd/nfsfh.h7
-rw-r--r--include/linux/nfsd/syscall.h116
-rw-r--r--include/linux/of.h41
-rw-r--r--include/linux/opp.h12
-rw-r--r--include/linux/oprofile.h2
-rw-r--r--include/linux/percpu_counter.h2
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pinctrl/machine.h107
-rw-r--r--include/linux/pinctrl/pinctrl.h133
-rw-r--r--include/linux/pinctrl/pinmux.h117
-rw-r--r--include/linux/platform_data/dwc3-omap.h47
-rw-r--r--include/linux/platform_data/exynos4_tmu.h83
-rw-r--r--include/linux/platform_data/mv_usb.h50
-rw-r--r--include/linux/platform_data/ntc_thermistor.h2
-rw-r--r--include/linux/platform_device.h48
-rw-r--r--include/linux/pm.h26
-rw-r--r--include/linux/pm_clock.h71
-rw-r--r--include/linux/pm_domain.h26
-rw-r--r--include/linux/pm_qos.h155
-rw-r--r--include/linux/pm_qos_params.h38
-rw-r--r--include/linux/pm_runtime.h42
-rw-r--r--include/linux/posix-timers.h5
-rw-r--r--include/linux/proportions.h6
-rw-r--r--include/linux/random.h13
-rw-r--r--include/linux/ratelimit.h6
-rw-r--r--include/linux/rcupdate.h300
-rw-r--r--include/linux/rcutiny.h20
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/regmap.h79
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rwsem-spinlock.h2
-rw-r--r--include/linux/rwsem.h10
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/semaphore.h4
-rw-r--r--include/linux/serial.h1
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_core.h10
-rw-r--r--include/linux/serial_reg.h1
-rw-r--r--include/linux/serio.h1
-rw-r--r--include/linux/skbuff.h82
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/sunrpc/clnt.h11
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h2
-rw-r--r--include/linux/sunrpc/svc.h32
-rw-r--r--include/linux/suspend.h95
-rw-r--r--include/linux/sysfs.h1
-rw-r--r--include/linux/trace_clock.h1
-rw-r--r--include/linux/tracepoint.h25
-rw-r--r--include/linux/tty.h26
-rw-r--r--include/linux/types.h10
-rw-r--r--include/linux/uinput.h2
-rw-r--r--include/linux/uio_driver.h7
-rw-r--r--include/linux/usb.h20
-rw-r--r--include/linux/usb/ch9.h29
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h3
-rw-r--r--include/linux/usb/r8a66597.h60
-rw-r--r--include/linux/usb/renesas_usbhs.h14
-rw-r--r--include/linux/virtio.h5
-rw-r--r--include/linux/xattr.h19
-rw-r--r--include/media/pwc-ioctl.h1
-rw-r--r--include/media/videobuf-dma-sg.h2
-rw-r--r--include/net/9p/9p.h14
-rw-r--r--include/net/9p/client.h8
-rw-r--r--include/net/9p/transport.h10
-rw-r--r--include/net/caif/caif_hsi.h37
-rw-r--r--include/net/inet_ecn.h8
-rw-r--r--include/net/inet_timewait_sock.h3
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip_vs.h1
-rw-r--r--include/net/sch_generic.h24
-rw-r--r--include/net/secure_seq.h2
-rw-r--r--include/net/tcp.h63
-rw-r--r--include/net/udplite.h63
-rw-r--r--include/scsi/osd_ore.h80
-rw-r--r--include/sound/pcm.h4
-rw-r--r--include/target/configfs_macros.h4
-rw-r--r--include/target/target_core_base.h71
-rw-r--r--include/target/target_core_tmr.h2
-rw-r--r--include/target/target_core_transport.h75
-rw-r--r--include/trace/events/9p.h176
-rw-r--r--include/trace/events/rcu.h459
-rw-r--r--include/trace/events/regmap.h136
-rw-r--r--include/trace/events/rpm.h99
-rw-r--r--include/trace/events/sched.h9
-rw-r--r--include/trace/ftrace.h3
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/interface/io/xs_wire.h6
-rw-r--r--include/xen/interface/physdev.h34
-rw-r--r--include/xen/page.h12
170 files changed, 5254 insertions, 1153 deletions
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 61e03dd..62ce682 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -38,8 +38,8 @@ typedef u64 cputime64_t;
/*
* Convert cputime to microseconds and back.
*/
-#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct);
-#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs);
+#define cputime_to_usecs(__ct) jiffies_to_usecs(__ct)
+#define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs)
/*
* Convert cputime to seconds and back.
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index 84793c70..9e5b035 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -145,11 +145,6 @@ struct f_owner_ex {
#define F_SHLCK 8 /* or 4 */
#endif
-/* for leases */
-#ifndef F_INPROGRESS
-#define F_INPROGRESS 16
-#endif
-
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index db22d13..b5e2e4c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -222,7 +222,6 @@
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
*(__tracepoints_ptrs) /* Tracepoints: pointer array */\
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
- *(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 9b7c2bb..43538b6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1624,6 +1624,9 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
drm_gem_object_unreference_unlocked(obj);
}
+void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 44335e5..8020798 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -466,6 +466,8 @@ enum drm_connector_force {
/* DACs should rarely do this without a lot of testing */
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
+#define MAX_ELD_BYTES 128
+
/**
* drm_connector - central DRM connector control structure
* @crtc: CRTC this connector is currently connected to, NULL if none
@@ -523,6 +525,13 @@ struct drm_connector {
uint32_t force_encoder_id;
struct drm_encoder *encoder; /* currently active encoder */
+ /* EDID bits */
+ uint8_t eld[MAX_ELD_BYTES];
+ bool dvi_dual;
+ int max_tmds_clock; /* in MHz */
+ bool latency_present[2];
+ int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ int audio_latency[2];
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
};
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 91567bb..0d2f727e 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -74,6 +74,20 @@
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
+#define DP_PSR_SUPPORT 0x070
+# define DP_PSR_IS_SUPPORTED 1
+#define DP_PSR_CAPS 0x071
+# define DP_PSR_NO_TRAIN_ON_EXIT 1
+# define DP_PSR_SETUP_TIME_330 (0 << 1)
+# define DP_PSR_SETUP_TIME_275 (1 << 1)
+# define DP_PSR_SETUP_TIME_220 (2 << 1)
+# define DP_PSR_SETUP_TIME_165 (3 << 1)
+# define DP_PSR_SETUP_TIME_110 (4 << 1)
+# define DP_PSR_SETUP_TIME_55 (5 << 1)
+# define DP_PSR_SETUP_TIME_0 (6 << 1)
+# define DP_PSR_SETUP_TIME_MASK (7 << 1)
+# define DP_PSR_SETUP_TIME_SHIFT 1
+
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
@@ -133,6 +147,18 @@
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
+#define DP_PSR_EN_CFG 0x170
+# define DP_PSR_ENABLE (1 << 0)
+# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
+# define DP_PSR_CRC_VERIFICATION (1 << 2)
+# define DP_PSR_FRAME_CAPTURE (1 << 3)
+
+#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
+# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
+# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
+# define DP_CP_IRQ (1 << 2)
+# define DP_SINK_SPECIFIC_IRQ (1 << 6)
+
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
# define DP_LANE_CR_DONE (1 << 0)
@@ -165,10 +191,45 @@
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+#define DP_TEST_REQUEST 0x218
+# define DP_TEST_LINK_TRAINING (1 << 0)
+# define DP_TEST_LINK_PATTERN (1 << 1)
+# define DP_TEST_LINK_EDID_READ (1 << 2)
+# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */
+
+#define DP_TEST_LINK_RATE 0x219
+# define DP_LINK_RATE_162 (0x6)
+# define DP_LINK_RATE_27 (0xa)
+
+#define DP_TEST_LANE_COUNT 0x220
+
+#define DP_TEST_PATTERN 0x221
+
+#define DP_TEST_RESPONSE 0x260
+# define DP_TEST_ACK (1 << 0)
+# define DP_TEST_NAK (1 << 1)
+# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
+
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
+#define DP_PSR_ERROR_STATUS 0x2006
+# define DP_PSR_LINK_CRC_ERROR (1 << 0)
+# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
+
+#define DP_PSR_ESI 0x2007
+# define DP_PSR_CAPS_CHANGE (1 << 0)
+
+#define DP_PSR_STATUS 0x2008
+# define DP_PSR_SINK_INACTIVE 0
+# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
+# define DP_PSR_SINK_ACTIVE_RFB 2
+# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3
+# define DP_PSR_SINK_ACTIVE_RESYNC 4
+# define DP_PSR_SINK_INTERNAL_ERROR 7
+# define DP_PSR_SINK_STATE_MASK 0x07
+
#define MODE_I2C_START 1
#define MODE_I2C_WRITE 2
#define MODE_I2C_READ 4
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index eacb415..74ce916 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -230,4 +230,13 @@ struct edid {
#define EDID_PRODUCT_ID(e) ((e)->prod_code[0] | ((e)->prod_code[1] << 8))
+struct drm_encoder;
+struct drm_connector;
+struct drm_display_mode;
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
+int drm_av_sync_delay(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+ struct drm_display_mode *mode);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
new file mode 100644
index 0000000..874c4d2
--- /dev/null
+++ b/include/drm/exynos_drm.h
@@ -0,0 +1,104 @@
+/* exynos_drm.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_H_
+#define _EXYNOS_DRM_H_
+
+/**
+ * User-desired buffer creation information structure.
+ *
+ * @size: requested size for the object.
+ * - this size value would be page-aligned internally.
+ * @flags: user request for setting memory type or cache attributes.
+ * @handle: returned handle for the object.
+ */
+struct drm_exynos_gem_create {
+ unsigned int size;
+ unsigned int flags;
+ unsigned int handle;
+};
+
+/**
+ * A structure for getting buffer offset.
+ *
+ * @handle: a pointer to gem object created.
+ * @pad: just padding to be 64-bit aligned.
+ * @offset: relatived offset value of the memory region allocated.
+ * - this value should be set by user.
+ */
+struct drm_exynos_gem_map_off {
+ unsigned int handle;
+ unsigned int pad;
+ uint64_t offset;
+};
+
+/**
+ * A structure for mapping buffer.
+ *
+ * @handle: a handle to gem object created.
+ * @size: memory size to be mapped.
+ * @mapped: having user virtual address mmaped.
+ * - this variable would be filled by exynos gem module
+ * of kernel side with user virtual address which is allocated
+ * by do_mmap().
+ */
+struct drm_exynos_gem_mmap {
+ unsigned int handle;
+ unsigned int size;
+ uint64_t mapped;
+};
+
+#define DRM_EXYNOS_GEM_CREATE 0x00
+#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
+#define DRM_EXYNOS_GEM_MMAP 0x02
+
+#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
+
+#define DRM_IOCTL_EXYNOS_GEM_MAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_MAP_OFFSET, struct drm_exynos_gem_map_off)
+
+#define DRM_IOCTL_EXYNOS_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_MMAP, struct drm_exynos_gem_mmap)
+
+/**
+ * Platform Specific Structure for DRM based FIMD.
+ *
+ * @timing: default video mode for initializing
+ * @default_win: default window layer number to be used for UI.
+ * @bpp: default bit per pixel.
+ */
+struct exynos_drm_fimd_pdata {
+ struct fb_videomode timing;
+ u32 vidcon0;
+ u32 vidcon1;
+ unsigned int default_win;
+ unsigned int bpp;
+};
+
+#endif
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 9e343c0..b174620 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -13,6 +13,8 @@ const struct intel_gtt {
unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
+ /* Whether we idle the gpu before mapping/unmapping */
+ unsigned int do_idle_maps : 1;
} *intel_gtt_get(void);
void intel_gtt_chipset_flush(void);
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 5c36432..cd7cd81 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -31,7 +31,6 @@
#define DRM_VMW_MAX_SURFACE_FACES 6
#define DRM_VMW_MAX_MIP_LEVELS 24
-#define DRM_VMW_EXT_NAME_LEN 128
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
@@ -48,10 +47,13 @@
#define DRM_VMW_UNREF_SURFACE 10
#define DRM_VMW_REF_SURFACE 11
#define DRM_VMW_EXECBUF 12
-#define DRM_VMW_FIFO_DEBUG 13
+#define DRM_VMW_GET_3D_CAP 13
#define DRM_VMW_FENCE_WAIT 14
-/* guarded by minor version >= 2 */
-#define DRM_VMW_UPDATE_LAYOUT 15
+#define DRM_VMW_FENCE_SIGNALED 15
+#define DRM_VMW_FENCE_UNREF 16
+#define DRM_VMW_FENCE_EVENT 17
+#define DRM_VMW_PRESENT 18
+#define DRM_VMW_PRESENT_READBACK 19
/*************************************************************************/
@@ -69,10 +71,10 @@
#define DRM_VMW_PARAM_NUM_STREAMS 0
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
#define DRM_VMW_PARAM_3D 2
-#define DRM_VMW_PARAM_FIFO_OFFSET 3
-#define DRM_VMW_PARAM_HW_CAPS 4
-#define DRM_VMW_PARAM_FIFO_CAPS 5
-#define DRM_VMW_PARAM_MAX_FB_SIZE 6
+#define DRM_VMW_PARAM_HW_CAPS 3
+#define DRM_VMW_PARAM_FIFO_CAPS 4
+#define DRM_VMW_PARAM_MAX_FB_SIZE 5
+#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
/**
* struct drm_vmw_getparam_arg
@@ -91,49 +93,6 @@ struct drm_vmw_getparam_arg {
/*************************************************************************/
/**
- * DRM_VMW_EXTENSION - Query device extensions.
- */
-
-/**
- * struct drm_vmw_extension_rep
- *
- * @exists: The queried extension exists.
- * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
- * @driver_sarea_offset: Offset to any space in the DRI SAREA
- * used by the extension.
- * @major: Major version number of the extension.
- * @minor: Minor version number of the extension.
- * @pl: Patch level version number of the extension.
- *
- * Output argument to the DRM_VMW_EXTENSION Ioctl.
- */
-
-struct drm_vmw_extension_rep {
- int32_t exists;
- uint32_t driver_ioctl_offset;
- uint32_t driver_sarea_offset;
- uint32_t major;
- uint32_t minor;
- uint32_t pl;
- uint32_t pad64;
-};
-
-/**
- * union drm_vmw_extension_arg
- *
- * @extension - Ascii name of the extension to be queried. //In
- * @rep - Reply as defined above. //Out
- *
- * Argument to the DRM_VMW_EXTENSION Ioctl.
- */
-
-union drm_vmw_extension_arg {
- char extension[DRM_VMW_EXT_NAME_LEN];
- struct drm_vmw_extension_rep rep;
-};
-
-/*************************************************************************/
-/**
* DRM_VMW_CREATE_CONTEXT - Create a host context.
*
* Allocates a device unique context id, and queues a create context command
@@ -292,7 +251,7 @@ union drm_vmw_surface_reference_arg {
* DRM_VMW_EXECBUF
*
* Submit a command buffer for execution on the host, and return a
- * fence sequence that when signaled, indicates that the command buffer has
+ * fence seqno that when signaled, indicates that the command buffer has
* executed.
*/
@@ -314,21 +273,30 @@ union drm_vmw_surface_reference_arg {
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
-#define DRM_VMW_EXECBUF_VERSION 0
+#define DRM_VMW_EXECBUF_VERSION 1
struct drm_vmw_execbuf_arg {
uint64_t commands;
uint32_t command_size;
uint32_t throttle_us;
uint64_t fence_rep;
- uint32_t version;
- uint32_t flags;
+ uint32_t version;
+ uint32_t flags;
};
/**
* struct drm_vmw_fence_rep
*
- * @fence_seq: Fence sequence associated with a command submission.
+ * @handle: Fence object handle for fence associated with a command submission.
+ * @mask: Fence flags relevant for this fence object.
+ * @seqno: Fence sequence number in fifo. A fence object with a lower
+ * seqno will signal the EXEC flag before a fence object with a higher
+ * seqno. This can be used by user-space to avoid kernel calls to determine
+ * whether a fence has signaled the EXEC flag. Note that @seqno will
+ * wrap at 32-bit.
+ * @passed_seqno: The highest seqno number processed by the hardware
+ * so far. This can be used to mark user-space fence objects as signaled, and
+ * to determine whether a fence seqno might be stale.
* @error: This member should've been set to -EFAULT on submission.
* The following actions should be take on completion:
* error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -342,9 +310,12 @@ struct drm_vmw_execbuf_arg {
*/
struct drm_vmw_fence_rep {
- uint64_t fence_seq;
- int32_t error;
+ uint32_t handle;
+ uint32_t mask;
+ uint32_t seqno;
+ uint32_t passed_seqno;
uint32_t pad64;
+ int32_t error;
};
/*************************************************************************/
@@ -435,39 +406,6 @@ struct drm_vmw_unref_dmabuf_arg {
/*************************************************************************/
/**
- * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
- *
- * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
- */
-
-/**
- * struct drm_vmw_fifo_debug_arg
- *
- * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
- * @debug_buffer_size: Size in bytes of debug buffer //In
- * @used_size: Number of bytes copied to the buffer // Out
- * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
- *
- * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
- */
-
-struct drm_vmw_fifo_debug_arg {
- uint64_t debug_buffer;
- uint32_t debug_buffer_size;
- uint32_t used_size;
- int32_t did_not_fit;
- uint32_t pad64;
-};
-
-struct drm_vmw_fence_wait_arg {
- uint64_t sequence;
- uint64_t kernel_cookie;
- int32_t cookie_valid;
- int32_t pad64;
-};
-
-/*************************************************************************/
-/**
* DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
*
* This IOCTL controls the overlay units of the svga device.
@@ -590,6 +528,30 @@ struct drm_vmw_stream_arg {
/*************************************************************************/
/**
+ * DRM_VMW_GET_3D_CAP
+ *
+ * Read 3D capabilities from the FIFO
+ *
+ */
+
+/**
+ * struct drm_vmw_get_3d_cap_arg
+ *
+ * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
+ * @size: Max size to copy
+ *
+ * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
+ * ioctls.
+ */
+
+struct drm_vmw_get_3d_cap_arg {
+ uint64_t buffer;
+ uint32_t max_size;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
* DRM_VMW_UPDATE_LAYOUT - Update layout
*
* Updates the preferred modes and connection status for connectors. The
@@ -612,4 +574,218 @@ struct drm_vmw_update_layout_arg {
uint64_t rects;
};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_WAIT
+ *
+ * Waits for a fence object to signal. The wait is interruptible, so that
+ * signals may be delivered during the interrupt. The wait may timeout,
+ * in which case the calls returns -EBUSY. If the wait is restarted,
+ * that is restarting without resetting @cookie_valid to zero,
+ * the timeout is computed from the first call.
+ *
+ * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
+ * on:
+ * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
+ * stream
+ * have executed.
+ * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
+ * commands
+ * in the buffer given to the EXECBUF ioctl returning the fence object handle
+ * are available to user-space.
+ *
+ * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
+ * fenc wait ioctl returns 0, the fence object has been unreferenced after
+ * the wait.
+ */
+
+#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
+#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
+
+#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
+
+/**
+ * struct drm_vmw_fence_wait_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
+ * @kernel_cookie: Set to 0 on first call. Left alone on restart.
+ * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
+ * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
+ * before returning.
+ * @flags: Fence flags to wait on.
+ * @wait_options: Options that control the behaviour of the wait ioctl.
+ *
+ * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
+ */
+
+struct drm_vmw_fence_wait_arg {
+ uint32_t handle;
+ int32_t cookie_valid;
+ uint64_t kernel_cookie;
+ uint64_t timeout_us;
+ int32_t lazy;
+ int32_t flags;
+ int32_t wait_options;
+ int32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_SIGNALED
+ *
+ * Checks if a fence object is signaled..
+ */
+
+/**
+ * struct drm_vmw_fence_signaled_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
+ * @signaled: Out: Flags signaled.
+ * @sequence: Out: Highest sequence passed so far. Can be used to signal the
+ * EXEC flag of user-space fence objects.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
+ * ioctls.
+ */
+
+struct drm_vmw_fence_signaled_arg {
+ uint32_t handle;
+ uint32_t flags;
+ int32_t signaled;
+ uint32_t passed_seqno;
+ uint32_t signaled_flags;
+ uint32_t pad64;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_UNREF
+ *
+ * Unreferences a fence object, and causes it to be destroyed if there are no
+ * other references to it.
+ *
+ */
+
+/**
+ * struct drm_vmw_fence_arg
+ *
+ * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
+ *
+ * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
+ */
+
+struct drm_vmw_fence_arg {
+ uint32_t handle;
+ uint32_t pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_FENCE_EVENT
+ *
+ * Queues an event on a fence to be delivered on the drm character device
+ * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
+ * Optionally the approximate time when the fence signaled is
+ * given by the event.
+ */
+
+/*
+ * The event type
+ */
+#define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
+
+struct drm_vmw_event_fence {
+ struct drm_event base;
+ uint64_t user_data;
+ uint32_t tv_sec;
+ uint32_t tv_usec;
+};
+
+/*
+ * Flags that may be given to the command.
+ */
+/* Request fence signaled time on the event. */
+#define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
+
+/**
+ * struct drm_vmw_fence_event_arg
+ *
+ * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
+ * the fence is not supposed to be referenced by user-space.
+ * @user_info: Info to be delivered with the event.
+ * @handle: Attach the event to this fence only.
+ * @flags: A set of flags as defined above.
+ */
+struct drm_vmw_fence_event_arg {
+ uint64_t fence_rep;
+ uint64_t user_data;
+ uint32_t handle;
+ uint32_t flags;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT
+ *
+ * Executes an SVGA present on a given fb for a given surface. The surface
+ * is placed on the framebuffer. Cliprects are given relative to the given
+ * point (the point disignated by dest_{x|y}).
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: framebuffer id to present / read back from.
+ * @sid: Surface id to present from.
+ * @dest_x: X placement coordinate for surface.
+ * @dest_y: Y placement coordinate for surface.
+ * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
+ * @num_clips: Number of cliprects given relative to the framebuffer origin,
+ * in the same coordinate space as the frame buffer.
+ * @pad64: Unused 64-bit padding.
+ *
+ * Input argument to the DRM_VMW_PRESENT ioctl.
+ */
+
+struct drm_vmw_present_arg {
+ uint32_t fb_id;
+ uint32_t sid;
+ int32_t dest_x;
+ int32_t dest_y;
+ uint64_t clips_ptr;
+ uint32_t num_clips;
+ uint32_t pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT_READBACK
+ *
+ * Executes an SVGA present readback from a given fb to the dma buffer
+ * currently bound as the fb. If there is no dma buffer bound to the fb,
+ * an error will be returned.
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: fb_id to present / read back from.
+ * @num_clips: Number of cliprects.
+ * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
+ * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
+ * If this member is NULL, then the ioctl should not return a fence.
+ */
+
+struct drm_vmw_present_readback_arg {
+ uint32_t fb_id;
+ uint32_t num_clips;
+ uint64_t clips_ptr;
+ uint64_t fence_rep;
+};
#endif
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index c5d6095..975009e 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -13,6 +13,16 @@ enum alarmtimer_type {
ALARM_NUMTYPE,
};
+enum alarmtimer_restart {
+ ALARMTIMER_NORESTART,
+ ALARMTIMER_RESTART,
+};
+
+
+#define ALARMTIMER_STATE_INACTIVE 0x00
+#define ALARMTIMER_STATE_ENQUEUED 0x01
+#define ALARMTIMER_STATE_CALLBACK 0x02
+
/**
* struct alarm - Alarm timer structure
* @node: timerqueue node for adding to the event list this value
@@ -25,16 +35,45 @@ enum alarmtimer_type {
*/
struct alarm {
struct timerqueue_node node;
- ktime_t period;
- void (*function)(struct alarm *);
+ enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
- bool enabled;
+ int state;
void *data;
};
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
- void (*function)(struct alarm *));
-void alarm_start(struct alarm *alarm, ktime_t start, ktime_t period);
-void alarm_cancel(struct alarm *alarm);
+ enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
+void alarm_start(struct alarm *alarm, ktime_t start);
+int alarm_try_to_cancel(struct alarm *alarm);
+int alarm_cancel(struct alarm *alarm);
+
+u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
+
+/*
+ * A alarmtimer is active, when it is enqueued into timerqueue or the
+ * callback function is running.
+ */
+static inline int alarmtimer_active(const struct alarm *timer)
+{
+ return timer->state != ALARMTIMER_STATE_INACTIVE;
+}
+
+/*
+ * Helper function to check, whether the timer is on one of the queues
+ */
+static inline int alarmtimer_is_queued(struct alarm *timer)
+{
+ return timer->state & ALARMTIMER_STATE_ENQUEUED;
+}
+
+/*
+ * Helper function to check, whether the timer is running the callback
+ * function
+ */
+static inline int alarmtimer_callback_running(struct alarm *timer)
+{
+ return timer->state & ALARMTIMER_STATE_CALLBACK;
+}
+
#endif
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
new file mode 100644
index 0000000..7911fda
--- /dev/null
+++ b/include/linux/bma150.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 Bosch Sensortec GmbH
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _BMA150_H_
+#define _BMA150_H_
+
+#define BMA150_DRIVER "bma150"
+
+struct bma150_cfg {
+ bool any_motion_int; /* Set to enable any-motion interrupt */
+ bool hg_int; /* Set to enable high-G interrupt */
+ bool lg_int; /* Set to enable low-G interrupt */
+ unsigned char any_motion_dur; /* Any-motion duration */
+ unsigned char any_motion_thres; /* Any-motion threshold */
+ unsigned char hg_hyst; /* High-G hysterisis */
+ unsigned char hg_dur; /* High-G duration */
+ unsigned char hg_thres; /* High-G threshold */
+ unsigned char lg_hyst; /* Low-G hysterisis */
+ unsigned char lg_dur; /* Low-G duration */
+ unsigned char lg_thres; /* Low-G threshold */
+ unsigned char range; /* BMA0150_RANGE_xxx (in G) */
+ unsigned char bandwidth; /* BMA0150_BW_xxx (in Hz) */
+};
+
+struct bma150_platform_data {
+ struct bma150_cfg cfg;
+ int (*irq_gpio_cfg)(void);
+};
+
+#endif /* _BMA150_H_ */
diff --git a/include/linux/can.h b/include/linux/can.h
index bb047dc..9a19bcb 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -8,8 +8,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_H
diff --git a/include/linux/can/bcm.h b/include/linux/can/bcm.h
index e96154d..3ebe387 100644
--- a/include/linux/can/bcm.h
+++ b/include/linux/can/bcm.h
@@ -7,8 +7,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_BCM_H
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 5ce6b5d..0ccc1cd 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -8,8 +8,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_CORE_H
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index cc0bb49..a0969fcb 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -8,7 +8,6 @@
*
* Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com>
*
- * Send feedback to <socketcan-users@lists.berlios.de>
*/
#ifndef CAN_DEV_H
diff --git a/include/linux/can/error.h b/include/linux/can/error.h
index 5958074..63e855e 100644
--- a/include/linux/can/error.h
+++ b/include/linux/can/error.h
@@ -7,8 +7,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_ERROR_H
diff --git a/include/linux/can/gw.h b/include/linux/can/gw.h
index 5527b54..8e1db18 100644
--- a/include/linux/can/gw.h
+++ b/include/linux/can/gw.h
@@ -7,8 +7,6 @@
* Copyright (c) 2011 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_GW_H
diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h
index 34542d3..14966dd 100644
--- a/include/linux/can/netlink.h
+++ b/include/linux/can/netlink.h
@@ -5,8 +5,6 @@
*
* Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_NETLINK_H
diff --git a/include/linux/can/raw.h b/include/linux/can/raw.h
index b2a0f87..781f3a3 100644
--- a/include/linux/can/raw.h
+++ b/include/linux/can/raw.h
@@ -8,8 +8,6 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef CAN_RAW_H
diff --git a/include/linux/capability.h b/include/linux/capability.h
index c421123..a63d13d 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -198,7 +198,7 @@ struct cpu_vfs_cap_data {
/* Allow modification of routing tables */
/* Allow setting arbitrary process / process group ownership on
sockets */
-/* Allow binding to any address for transparent proxying */
+/* Allow binding to any address for transparent proxying (also via NET_RAW) */
/* Allow setting TOS (type of service) */
/* Allow setting promiscuous mode */
/* Allow clearing driver statistics */
@@ -210,6 +210,7 @@ struct cpu_vfs_cap_data {
/* Allow use of RAW sockets */
/* Allow use of PACKET sockets */
+/* Allow binding to any address for transparent proxying (also via NET_ADMIN) */
#define CAP_NET_RAW 13
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index d7adf15..ca768ae 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -6,7 +6,6 @@
#include <linux/net.h>
#include <linux/radix-tree.h>
#include <linux/uio.h>
-#include <linux/version.h>
#include <linux/workqueue.h>
#include "types.h"
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 1d37f42..7213b52 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -11,6 +11,8 @@
#ifndef __LINUX_CLK_H
#define __LINUX_CLK_H
+#include <linux/kernel.h>
+
struct device;
/*
@@ -41,11 +43,31 @@ struct clk;
struct clk *clk_get(struct device *dev, const char *id);
/**
+ * clk_prepare - prepare a clock source
+ * @clk: clock source
+ *
+ * This prepares the clock source for use.
+ *
+ * Must not be called from within atomic context.
+ */
+#ifdef CONFIG_HAVE_CLK_PREPARE
+int clk_prepare(struct clk *clk);
+#else
+static inline int clk_prepare(struct clk *clk)
+{
+ might_sleep();
+ return 0;
+}
+#endif
+
+/**
* clk_enable - inform the system when the clock source should be running.
* @clk: clock source
*
* If the clock can not be enabled/disabled, this should return success.
*
+ * May be called from atomic contexts.
+ *
* Returns success (0) or negative errno.
*/
int clk_enable(struct clk *clk);
@@ -57,6 +79,8 @@ int clk_enable(struct clk *clk);
* Inform the system that a clock source is no longer required by
* a driver and may be shut down.
*
+ * May be called from atomic contexts.
+ *
* Implementation detail: if the clock source is shared between
* multiple drivers, clk_enable() calls must be balanced by the
* same number of clk_disable() calls for the clock source to be
@@ -64,6 +88,25 @@ int clk_enable(struct clk *clk);
*/
void clk_disable(struct clk *clk);
+
+/**
+ * clk_unprepare - undo preparation of a clock source
+ * @clk: clock source
+ *
+ * This undoes a previously prepared clock. The caller must balance
+ * the number of prepare and unprepare calls.
+ *
+ * Must not be called from within atomic context.
+ */
+#ifdef CONFIG_HAVE_CLK_PREPARE
+void clk_unprepare(struct clk *clk);
+#else
+static inline void clk_unprepare(struct clk *clk)
+{
+ might_sleep();
+}
+#endif
+
/**
* clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
* This is only valid once the clock source has been enabled.
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
index 457bcb0..d9a4fd0 100644
--- a/include/linux/clkdev.h
+++ b/include/linux/clkdev.h
@@ -24,6 +24,13 @@ struct clk_lookup {
struct clk *clk;
};
+#define CLKDEV_INIT(d, n, c) \
+ { \
+ .dev_id = d, \
+ .con_id = n, \
+ .clk = c, \
+ }
+
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
const char *dev_fmt, ...);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index d6733e2..81e803e 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -45,20 +45,22 @@ enum clock_event_nofitiers {
*/
#define CLOCK_EVT_FEAT_PERIODIC 0x000001
#define CLOCK_EVT_FEAT_ONESHOT 0x000002
+#define CLOCK_EVT_FEAT_KTIME 0x000004
/*
* x86(64) specific misfeatures:
*
* - Clockevent source stops in C3 State and needs broadcast support.
* - Local APIC timer is used as a dummy device.
*/
-#define CLOCK_EVT_FEAT_C3STOP 0x000004
-#define CLOCK_EVT_FEAT_DUMMY 0x000008
+#define CLOCK_EVT_FEAT_C3STOP 0x000008
+#define CLOCK_EVT_FEAT_DUMMY 0x000010
/**
* struct clock_event_device - clock event device descriptor
* @event_handler: Assigned by the framework to be called by the low
* level handler of the event source
- * @set_next_event: set next event function
+ * @set_next_event: set next event function using a clocksource delta
+ * @set_next_ktime: set next event function using a direct ktime value
* @next_event: local storage for the next event in oneshot mode
* @max_delta_ns: maximum delta value in ns
* @min_delta_ns: minimum delta value in ns
@@ -81,6 +83,8 @@ struct clock_event_device {
void (*event_handler)(struct clock_event_device *);
int (*set_next_event)(unsigned long evt,
struct clock_event_device *);
+ int (*set_next_ktime)(ktime_t expires,
+ struct clock_event_device *);
ktime_t next_event;
u64 max_delta_ns;
u64 min_delta_ns;
@@ -140,7 +144,7 @@ extern void clockevents_set_mode(struct clock_event_device *dev,
enum clock_event_mode mode);
extern int clockevents_register_notifier(struct notifier_block *nb);
extern int clockevents_program_event(struct clock_event_device *dev,
- ktime_t expires, ktime_t now);
+ ktime_t expires, bool force);
extern void clockevents_handle_noop(struct clock_event_device *dev);
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
new file mode 100644
index 0000000..455b233
--- /dev/null
+++ b/include/linux/cpu_pm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPU_PM_H
+#define _LINUX_CPU_PM_H
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+/*
+ * When a CPU goes to a low power state that turns off power to the CPU's
+ * power domain, the contents of some blocks (floating point coprocessors,
+ * interrupt controllers, caches, timers) in the same power domain can
+ * be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
+ * and hotplug implementations to notify the drivers for these blocks that
+ * they may be reset.
+ *
+ * All cpu_pm notifications must be called with interrupts disabled.
+ *
+ * The notifications are split into two classes: CPU notifications and CPU
+ * cluster notifications.
+ *
+ * CPU notifications apply to a single CPU and must be called on the affected
+ * CPU. They are used to save per-cpu context for affected blocks.
+ *
+ * CPU cluster notifications apply to all CPUs in a single power domain. They
+ * are used to save any global context for affected blocks, and must be called
+ * after all the CPUs in the power domain have been notified of the low power
+ * state.
+ */
+
+/*
+ * Event codes passed as unsigned long val to notifier calls
+ */
+enum cpu_pm_event {
+ /* A single cpu is entering a low power state */
+ CPU_PM_ENTER,
+
+ /* A single cpu failed to enter a low power state */
+ CPU_PM_ENTER_FAILED,
+
+ /* A single cpu is exiting a low power state */
+ CPU_PM_EXIT,
+
+ /* A cpu power domain is entering a low power state */
+ CPU_CLUSTER_PM_ENTER,
+
+ /* A cpu power domain failed to enter a low power state */
+ CPU_CLUSTER_PM_ENTER_FAILED,
+
+ /* A cpu power domain is exiting a low power state */
+ CPU_CLUSTER_PM_EXIT,
+};
+
+#ifdef CONFIG_CPU_PM
+int cpu_pm_register_notifier(struct notifier_block *nb);
+int cpu_pm_unregister_notifier(struct notifier_block *nb);
+int cpu_pm_enter(void);
+int cpu_pm_exit(void);
+int cpu_cluster_pm_enter(void);
+int cpu_cluster_pm_exit(void);
+
+#else
+
+static inline int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_pm_exit(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_exit(void)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
new file mode 100644
index 0000000..afb9458
--- /dev/null
+++ b/include/linux/devfreq.h
@@ -0,0 +1,238 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ * for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_H__
+#define __LINUX_DEVFREQ_H__
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/opp.h>
+
+#define DEVFREQ_NAME_LEN 16
+
+struct devfreq;
+
+/**
+ * struct devfreq_dev_status - Data given from devfreq user device to
+ * governors. Represents the performance
+ * statistics.
+ * @total_time The total time represented by this instance of
+ * devfreq_dev_status
+ * @busy_time The time that the device was working among the
+ * total_time.
+ * @current_frequency The operating frequency.
+ * @private_data An entry not specified by the devfreq framework.
+ * A device and a specific governor may have their
+ * own protocol with private_data. However, because
+ * this is governor-specific, a governor using this
+ * will be only compatible with devices aware of it.
+ */
+struct devfreq_dev_status {
+ /* both since the last measure */
+ unsigned long total_time;
+ unsigned long busy_time;
+ unsigned long current_frequency;
+ void *private_date;
+};
+
+/**
+ * struct devfreq_dev_profile - Devfreq's user device profile
+ * @initial_freq The operating frequency when devfreq_add_device() is
+ * called.
+ * @polling_ms The polling interval in ms. 0 disables polling.
+ * @target The device should set its operating frequency at
+ * freq or lowest-upper-than-freq value. If freq is
+ * higher than any operable frequency, set maximum.
+ * Before returning, target function should set
+ * freq at the current frequency.
+ * @get_dev_status The device should provide the current performance
+ * status to devfreq, which is used by governors.
+ * @exit An optional callback that is called when devfreq
+ * is removing the devfreq object due to error or
+ * from devfreq_remove_device() call. If the user
+ * has registered devfreq->nb at a notifier-head,
+ * this is the time to unregister it.
+ */
+struct devfreq_dev_profile {
+ unsigned long initial_freq;
+ unsigned int polling_ms;
+
+ int (*target)(struct device *dev, unsigned long *freq);
+ int (*get_dev_status)(struct device *dev,
+ struct devfreq_dev_status *stat);
+ void (*exit)(struct device *dev);
+};
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @name Governor's name
+ * @get_target_freq Returns desired operating frequency for the device.
+ * Basically, get_target_freq will run
+ * devfreq_dev_profile.get_dev_status() to get the
+ * status of the device (load = busy_time / total_time).
+ * If no_central_polling is set, this callback is called
+ * only with update_devfreq() notified by OPP.
+ * @init Called when the devfreq is being attached to a device
+ * @exit Called when the devfreq is being removed from a
+ * device. Governor should stop any internal routines
+ * before return because related data may be
+ * freed after exit().
+ * @no_central_polling Do not use devfreq's central polling mechanism.
+ * When this is set, devfreq will not call
+ * get_target_freq with devfreq_monitor(). However,
+ * devfreq will call get_target_freq with
+ * devfreq_update() notified by OPP framework.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+ const char name[DEVFREQ_NAME_LEN];
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*init)(struct devfreq *this);
+ void (*exit)(struct devfreq *this);
+ const bool no_central_polling;
+};
+
+/**
+ * struct devfreq - Device devfreq structure
+ * @node list node - contains the devices with devfreq that have been
+ * registered.
+ * @lock a mutex to protect accessing devfreq.
+ * @dev device registered by devfreq class. dev.parent is the device
+ * using devfreq.
+ * @profile device-specific devfreq profile
+ * @governor method how to choose frequency based on the usage.
+ * @nb notifier block used to notify devfreq object that it should
+ * reevaluate operable frequencies. Devfreq users may use
+ * devfreq.nb to the corresponding register notifier call chain.
+ * @polling_jiffies interval in jiffies.
+ * @previous_freq previously configured frequency value.
+ * @next_polling the number of remaining jiffies to poll with
+ * "devfreq_monitor" executions to reevaluate
+ * frequency/voltage of the device. Set by
+ * profile's polling_ms interval.
+ * @data Private data of the governor. The devfreq framework does not
+ * touch this.
+ * @being_removed a flag to mark that this object is being removed in
+ * order to prevent trying to remove the object multiple times.
+ *
+ * This structure stores the devfreq information for a give device.
+ *
+ * Note that when a governor accesses entries in struct devfreq in its
+ * functions except for the context of callbacks defined in struct
+ * devfreq_governor, the governor should protect its access with the
+ * struct mutex lock in struct devfreq. A governor may use this mutex
+ * to protect its own private data in void *data as well.
+ */
+struct devfreq {
+ struct list_head node;
+
+ struct mutex lock;
+ struct device dev;
+ struct devfreq_dev_profile *profile;
+ const struct devfreq_governor *governor;
+ struct notifier_block nb;
+
+ unsigned long polling_jiffies;
+ unsigned long previous_freq;
+ unsigned int next_polling;
+
+ void *data; /* private data for governors */
+
+ bool being_removed;
+};
+
+#if defined(CONFIG_PM_DEVFREQ)
+extern struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const struct devfreq_governor *governor,
+ void *data);
+extern int devfreq_remove_device(struct devfreq *devfreq);
+
+/* Helper functions for devfreq user device driver with OPP. */
+extern struct opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq);
+extern int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+extern int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+
+#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE
+extern const struct devfreq_governor devfreq_powersave;
+#endif
+#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE
+extern const struct devfreq_governor devfreq_performance;
+#endif
+#ifdef CONFIG_DEVFREQ_GOV_USERSPACE
+extern const struct devfreq_governor devfreq_userspace;
+#endif
+#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND
+extern const struct devfreq_governor devfreq_simple_ondemand;
+/**
+ * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * and devfreq_add_device
+ * @ upthreshold If the load is over this value, the frequency jumps.
+ * Specify 0 to use the default. Valid value = 0 to 100.
+ * @ downdifferential If the load is under upthreshold - downdifferential,
+ * the governor may consider slowing the frequency down.
+ * Specify 0 to use the default. Valid value = 0 to 100.
+ * downdifferential < upthreshold must hold.
+ *
+ * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
+ * the governor uses the default values.
+ */
+struct devfreq_simple_ondemand_data {
+ unsigned int upthreshold;
+ unsigned int downdifferential;
+};
+#endif
+
+#else /* !CONFIG_PM_DEVFREQ */
+static struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ struct devfreq_governor *governor,
+ void *data);
+{
+ return NULL;
+}
+
+static int devfreq_remove_device(struct devfreq *devfreq);
+{
+ return 0;
+}
+
+static struct opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq)
+{
+ return -EINVAL;
+}
+
+static int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+static int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+#define devfreq_powersave NULL
+#define devfreq_performance NULL
+#define devfreq_userspace NULL
+#define devfreq_simple_ondemand NULL
+
+#endif /* CONFIG_PM_DEVFREQ */
+
+#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/device.h b/include/linux/device.h
index c20dfbf..bdcf361 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -350,6 +350,8 @@ struct class_attribute {
char *buf);
ssize_t (*store)(struct class *class, struct class_attribute *attr,
const char *buf, size_t count);
+ const void *(*namespace)(struct class *class,
+ const struct class_attribute *attr);
};
#define CLASS_ATTR(_name, _mode, _show, _store) \
@@ -636,6 +638,11 @@ static inline void set_dev_node(struct device *dev, int node)
}
#endif
+static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
+{
+ return dev ? dev->power.subsys_data : NULL;
+}
+
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
@@ -785,6 +792,8 @@ extern const char *dev_driver_string(const struct device *dev);
#ifdef CONFIG_PRINTK
+extern int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf);
extern int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
@@ -805,6 +814,9 @@ extern int _dev_info(const struct device *dev, const char *fmt, ...)
#else
+static inline int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+ { return 0; }
static inline int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
__attribute__ ((format (printf, 3, 4)));
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bbd8661..ef90cbd 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -25,11 +25,12 @@ struct intel_iommu;
struct dmar_domain;
struct root_entry;
-extern void free_dmar_iommu(struct intel_iommu *iommu);
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
+extern void free_dmar_iommu(struct intel_iommu *iommu);
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
@@ -39,8 +40,11 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return 0;
}
+static inline void free_dmar_iommu(struct intel_iommu *iommu)
+{
+}
+#define dmar_disabled (1)
#endif
-extern int dmar_disabled;
#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 7b776d7..a8b1a84 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -26,8 +26,13 @@
#include <linux/msi.h>
#include <linux/irqreturn.h>
+/* DMAR Flags */
+#define DMAR_INTR_REMAP 0x1
+#define DMAR_X2APIC_OPT_OUT 0x2
+
struct intel_iommu;
-#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
+#ifdef CONFIG_DMAR_TABLE
+extern struct acpi_table_header *dmar_tbl;
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -76,7 +81,7 @@ static inline int enable_drhd_fault_handling(void)
{
return -1;
}
-#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
+#endif /* !CONFIG_DMAR_TABLE */
struct irte {
union {
@@ -107,10 +112,10 @@ struct irte {
};
};
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
extern int intr_remapping_enabled;
extern int intr_remapping_supported(void);
-extern int enable_intr_remapping(int);
+extern int enable_intr_remapping(void);
extern void disable_intr_remapping(void);
extern int reenable_intr_remapping(int);
@@ -177,7 +182,7 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
#define intr_remapping_enabled (0)
-static inline int enable_intr_remapping(int eim)
+static inline int enable_intr_remapping(void)
{
return -1;
}
@@ -192,6 +197,11 @@ static inline int reenable_intr_remapping(int eim)
}
#endif
+enum {
+ IRQ_REMAP_XAPIC_MODE,
+ IRQ_REMAP_X2APIC_MODE,
+};
+
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
@@ -204,7 +214,7 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_rmrr_unit {
@@ -227,9 +237,26 @@ struct dmar_atsr_unit {
u8 include_all:1; /* include all ports */
};
+int dmar_parse_rmrr_atsr_dev(void);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
+extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
+ struct pci_dev ***devices, u16 segment);
extern int intel_iommu_init(void);
-#else /* !CONFIG_DMAR: */
+#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
-#endif /* CONFIG_DMAR */
+static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
+{
+ return 0;
+}
+static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
+{
+ return 0;
+}
+static inline int dmar_parse_rmrr_atsr_dev(void)
+{
+ return 0;
+}
+#endif /* CONFIG_INTEL_IOMMU */
#endif /* __DMAR_H__ */
diff --git a/include/linux/drbd_tag_magic.h b/include/linux/drbd_tag_magic.h
index 0695431..81f52f2 100644
--- a/include/linux/drbd_tag_magic.h
+++ b/include/linux/drbd_tag_magic.h
@@ -28,7 +28,7 @@ enum packet_types {
#define NL_STRING(pn, pr, member, len) \
unsigned char member[len]; int member ## _len; \
int tag_and_len ## member;
-#include "linux/drbd_nl.h"
+#include <linux/drbd_nl.h>
/* declare tag-list-sizes */
static const int tag_list_sizes[] = {
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 49638ea..07261d5 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -46,7 +46,7 @@ struct dw_apb_clock_event_device *
dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
void __iomem *base, int irq, unsigned long freq);
struct dw_apb_clocksource *
-dw_apb_clocksource_init(unsigned rating, char *name, void __iomem *base,
+dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
unsigned long freq);
void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs);
void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs);
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index e747ecd..13aae80 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -1,13 +1,6 @@
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
-/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
- * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
- * use independent hash functions, to reduce the chance of false positives.
- */
-extern long long dynamic_debug_enabled;
-extern long long dynamic_debug_enabled2;
-
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
@@ -47,26 +40,55 @@ extern int ddebug_remove_module(const char *mod_name);
extern int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
__attribute__ ((format (printf, 2, 3)));
-#define dynamic_pr_debug(fmt, ...) do { \
- static struct _ddebug descriptor \
- __used \
- __attribute__((section("__verbose"), aligned(8))) = \
- { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
- _DPRINTK_FLAGS_DEFAULT }; \
- if (unlikely(descriptor.enabled)) \
- __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
- } while (0)
-
-
-#define dynamic_dev_dbg(dev, fmt, ...) do { \
- static struct _ddebug descriptor \
- __used \
- __attribute__((section("__verbose"), aligned(8))) = \
- { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \
- _DPRINTK_FLAGS_DEFAULT }; \
- if (unlikely(descriptor.enabled)) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
- } while (0)
+struct device;
+
+extern int __dynamic_dev_dbg(struct _ddebug *descriptor,
+ const struct device *dev,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+struct net_device;
+
+extern int __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev,
+ const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ static struct _ddebug __used __aligned(8) \
+ __attribute__((section("__verbose"))) name = { \
+ .modname = KBUILD_MODNAME, \
+ .function = __func__, \
+ .filename = __FILE__, \
+ .format = (fmt), \
+ .lineno = __LINE__, \
+ .flags = _DPRINTK_FLAGS_DEFAULT, \
+ .enabled = false, \
+ }
+
+#define dynamic_pr_debug(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.enabled)) \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_dev_dbg(dev, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.enabled)) \
+ __dynamic_dev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_netdev_dbg(dev, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.enabled)) \
+ __dynamic_netdev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
#else
diff --git a/include/linux/evm.h b/include/linux/evm.h
new file mode 100644
index 0000000..9fc13a7
--- /dev/null
+++ b/include/linux/evm.h
@@ -0,0 +1,100 @@
+/*
+ * evm.h
+ *
+ * Copyright (c) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#ifndef _LINUX_EVM_H
+#define _LINUX_EVM_H
+
+#include <linux/integrity.h>
+#include <linux/xattr.h>
+
+struct integrity_iint_cache;
+
+#ifdef CONFIG_EVM
+extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint);
+extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
+extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size);
+extern void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len);
+extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name);
+extern int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm);
+#ifdef CONFIG_FS_POSIX_ACL
+extern int posix_xattr_acl(const char *xattrname);
+#else
+static inline int posix_xattr_acl(const char *xattrname)
+{
+ return 0;
+}
+#endif
+#else
+#ifdef CONFIG_INTEGRITY
+static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ return INTEGRITY_UNKNOWN;
+}
+#endif
+
+static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ return;
+}
+
+static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len)
+{
+ return;
+}
+
+static inline int evm_inode_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return;
+}
+
+static inline int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm)
+{
+ return 0;
+}
+
+#endif /* CONFIG_EVM_H */
+#endif /* LINUX_EVM_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 741956f..8eeb205 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -155,7 +155,7 @@ extern unsigned int sk_run_filter(const struct sk_buff *skb,
const struct sock_filter *filter);
extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
extern int sk_detach_filter(struct sock *sk);
-extern int sk_chk_filter(struct sock_filter *filter, int flen);
+extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
#ifdef CONFIG_BPF_JIT
extern void bpf_jit_compile(struct sk_filter *fp);
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 1effc8b..a49b529 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -49,6 +49,7 @@ extern int thaw_process(struct task_struct *p);
extern void refrigerator(void);
extern int freeze_processes(void);
+extern int freeze_kernel_threads(void);
extern void thaw_processes(void);
static inline int try_to_freeze(void)
@@ -134,10 +135,25 @@ static inline void set_freezable_with_signal(void)
}
/*
- * Freezer-friendly wrappers around wait_event_interruptible() and
- * wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
+ * Freezer-friendly wrappers around wait_event_interruptible(),
+ * wait_event_killable() and wait_event_interruptible_timeout(), originally
+ * defined in <linux/wait.h>
*/
+#define wait_event_freezekillable(wq, condition) \
+({ \
+ int __retval; \
+ do { \
+ __retval = wait_event_killable(wq, \
+ (condition) || freezing(current)); \
+ if (__retval && !freezing(current)) \
+ break; \
+ else if (!(condition)) \
+ __retval = -ERESTARTSYS; \
+ } while (try_to_freeze()); \
+ __retval; \
+})
+
#define wait_event_freezable(wq, condition) \
({ \
int __retval; \
@@ -171,7 +187,8 @@ static inline void clear_freeze_flag(struct task_struct *p) {}
static inline int thaw_process(struct task_struct *p) { return 1; }
static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
+static inline int freeze_processes(void) { return -ENOSYS; }
+static inline int freeze_kernel_threads(void) { return -ENOSYS; }
static inline void thaw_processes(void) {}
static inline int try_to_freeze(void) { return 0; }
@@ -188,6 +205,9 @@ static inline void set_freezable_with_signal(void) {}
#define wait_event_freezable_timeout(wq, condition, timeout) \
wait_event_interruptible_timeout(wq, condition, timeout)
+#define wait_event_freezekillable(wq, condition) \
+ wait_event_killable(wq, condition)
+
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 277f497..14493a2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -58,14 +58,15 @@ struct inodes_stat_t {
#define NR_FILE 8192 /* this can well be larger on a larger system */
-#define MAY_EXEC 1
-#define MAY_WRITE 2
-#define MAY_READ 4
-#define MAY_APPEND 8
-#define MAY_ACCESS 16
-#define MAY_OPEN 32
-#define MAY_CHDIR 64
-#define MAY_NOT_BLOCK 128 /* called from RCU mode, don't block */
+#define MAY_EXEC 0x00000001
+#define MAY_WRITE 0x00000002
+#define MAY_READ 0x00000004
+#define MAY_APPEND 0x00000008
+#define MAY_ACCESS 0x00000010
+#define MAY_OPEN 0x00000020
+#define MAY_CHDIR 0x00000040
+/* called from RCU mode, don't block */
+#define MAY_NOT_BLOCK 0x00000080
/*
* flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
@@ -963,7 +964,12 @@ struct file {
#define f_dentry f_path.dentry
#define f_vfsmnt f_path.mnt
const struct file_operations *f_op;
- spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
+
+ /*
+ * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR.
+ * Must not be taken from IRQ context.
+ */
+ spinlock_t f_lock;
#ifdef CONFIG_SMP
int f_sb_list_cpu;
#endif
@@ -1063,6 +1069,8 @@ static inline int file_check_writeable(struct file *filp)
#define FL_LEASE 32 /* lease held on this file */
#define FL_CLOSE 64 /* unlock on close */
#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
@@ -1109,7 +1117,7 @@ struct file_lock {
struct list_head fl_link; /* doubly linked list of all locks */
struct list_head fl_block; /* circular list of blocked processes */
fl_owner_t fl_owner;
- unsigned char fl_flags;
+ unsigned int fl_flags;
unsigned char fl_type;
unsigned int fl_pid;
struct pid *fl_nspid;
@@ -1119,7 +1127,9 @@ struct file_lock {
loff_t fl_end;
struct fasync_struct * fl_fasync; /* for lease break notifications */
- unsigned long fl_break_time; /* for nonblocking lease breaks */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
@@ -2397,8 +2407,8 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
-extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
- int origin);
+extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
+ int origin, loff_t maxsize);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 9cf8e7a..deed5f9 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -71,6 +71,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/input.h>
+#include <linux/semaphore.h>
/*
* We parse each description item into this structure. Short items data
@@ -312,6 +313,7 @@ struct hid_item {
#define HID_QUIRK_BADPAD 0x00000020
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
+#define HID_QUIRK_MULTITOUCH 0x00000100
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
@@ -475,6 +477,7 @@ struct hid_device { /* device report descriptor */
unsigned country; /* HID country */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
+ struct semaphore driver_lock; /* protects the current driver */
struct device dev; /* device */
struct hid_driver *driver;
struct hid_ll_driver *ll_driver;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
new file mode 100644
index 0000000..12ec328
--- /dev/null
+++ b/include/linux/hyperv.h
@@ -0,0 +1,873 @@
+/*
+ *
+ * Copyright (c) 2011, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ *
+ */
+
+#ifndef _HYPERV_H
+#define _HYPERV_H
+
+#include <linux/scatterlist.h>
+#include <linux/list.h>
+#include <linux/uuid.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+
+#define MAX_PAGE_BUFFER_COUNT 16
+#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
+
+#pragma pack(push, 1)
+
+/* Single-page buffer */
+struct hv_page_buffer {
+ u32 len;
+ u32 offset;
+ u64 pfn;
+};
+
+/* Multiple-page buffer */
+struct hv_multipage_buffer {
+ /* Length and Offset determines the # of pfns in the array */
+ u32 len;
+ u32 offset;
+ u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
+};
+
+/* 0x18 includes the proprietary packet header */
+#define MAX_PAGE_BUFFER_PACKET (0x18 + \
+ (sizeof(struct hv_page_buffer) * \
+ MAX_PAGE_BUFFER_COUNT))
+#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
+ sizeof(struct hv_multipage_buffer))
+
+
+#pragma pack(pop)
+
+struct hv_ring_buffer {
+ /* Offset in bytes from the start of ring data below */
+ u32 write_index;
+
+ /* Offset in bytes from the start of ring data below */
+ u32 read_index;
+
+ u32 interrupt_mask;
+
+ /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ u8 reserved[4084];
+
+ /* NOTE:
+ * The interrupt_mask field is used only for channels but since our
+ * vmbus connection also uses this data structure and its data starts
+ * here, we commented out this field.
+ */
+
+ /*
+ * Ring data starts here + RingDataStartOffset
+ * !!! DO NOT place any fields below this !!!
+ */
+ u8 buffer[0];
+} __packed;
+
+struct hv_ring_buffer_info {
+ struct hv_ring_buffer *ring_buffer;
+ u32 ring_size; /* Include the shared header */
+ spinlock_t ring_lock;
+
+ u32 ring_datasize; /* < ring_size */
+ u32 ring_data_startoffset;
+};
+
+struct hv_ring_buffer_debug_info {
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
+
+/*
+ * We use the same version numbering for all Hyper-V modules.
+ *
+ * Definition of versioning is as follows;
+ *
+ * Major Number Changes for these scenarios;
+ * 1. When a new version of Windows Hyper-V
+ * is released.
+ * 2. A Major change has occurred in the
+ * Linux IC's.
+ * (For example the merge for the first time
+ * into the kernel) Every time the Major Number
+ * changes, the Revision number is reset to 0.
+ * Minor Number Changes when new functionality is added
+ * to the Linux IC's that is not a bug fix.
+ *
+ * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
+ */
+#define HV_DRV_VERSION "3.1"
+
+
+/*
+ * A revision number of vmbus that is used for ensuring both ends on a
+ * partition are using compatible versions.
+ */
+#define VMBUS_REVISION_NUMBER 13
+
+/* Make maximum size of pipe payload of 16K */
+#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
+
+/* Define PipeMode values. */
+#define VMBUS_PIPE_TYPE_BYTE 0x00000000
+#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
+
+/* The size of the user defined data buffer for non-pipe offers. */
+#define MAX_USER_DEFINED_BYTES 120
+
+/* The size of the user defined data buffer for pipe offers. */
+#define MAX_PIPE_USER_DEFINED_BYTES 116
+
+/*
+ * At the center of the Channel Management library is the Channel Offer. This
+ * struct contains the fundamental information about an offer.
+ */
+struct vmbus_channel_offer {
+ uuid_le if_type;
+ uuid_le if_instance;
+ u64 int_latency; /* in 100ns units */
+ u32 if_revision;
+ u32 server_ctx_size; /* in bytes */
+ u16 chn_flags;
+ u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
+
+ union {
+ /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
+ struct {
+ unsigned char user_def[MAX_USER_DEFINED_BYTES];
+ } std;
+
+ /*
+ * Pipes:
+ * The following sructure is an integrated pipe protocol, which
+ * is implemented on top of standard user-defined data. Pipe
+ * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
+ * use.
+ */
+ struct {
+ u32 pipe_mode;
+ unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
+ } pipe;
+ } u;
+ u32 padding;
+} __packed;
+
+/* Server Flags */
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
+
+struct vmpacket_descriptor {
+ u16 type;
+ u16 offset8;
+ u16 len8;
+ u16 flags;
+ u64 trans_id;
+} __packed;
+
+struct vmpacket_header {
+ u32 prev_pkt_start_offset;
+ struct vmpacket_descriptor descriptor;
+} __packed;
+
+struct vmtransfer_page_range {
+ u32 byte_count;
+ u32 byte_offset;
+} __packed;
+
+struct vmtransfer_page_packet_header {
+ struct vmpacket_descriptor d;
+ u16 xfer_pageset_id;
+ bool sender_owns_set;
+ u8 reserved;
+ u32 range_cnt;
+ struct vmtransfer_page_range ranges[1];
+} __packed;
+
+struct vmgpadl_packet_header {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u32 reserved;
+} __packed;
+
+struct vmadd_remove_transfer_page_set {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u16 xfer_pageset_id;
+ u16 reserved;
+} __packed;
+
+/*
+ * This structure defines a range in guest physical space that can be made to
+ * look virtually contiguous.
+ */
+struct gpa_range {
+ u32 byte_count;
+ u32 byte_offset;
+ u64 pfn_array[0];
+};
+
+/*
+ * This is the format for an Establish Gpadl packet, which contains a handle by
+ * which this GPADL will be known and a set of GPA ranges associated with it.
+ * This can be converted to a MDL by the guest OS. If there are multiple GPA
+ * ranges, then the resulting MDL will be "chained," representing multiple VA
+ * ranges.
+ */
+struct vmestablish_gpadl {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u32 range_cnt;
+ struct gpa_range range[1];
+} __packed;
+
+/*
+ * This is the format for a Teardown Gpadl packet, which indicates that the
+ * GPADL handle in the Establish Gpadl packet will never be referenced again.
+ */
+struct vmteardown_gpadl {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u32 reserved; /* for alignment to a 8-byte boundary */
+} __packed;
+
+/*
+ * This is the format for a GPA-Direct packet, which contains a set of GPA
+ * ranges, in addition to commands and/or data.
+ */
+struct vmdata_gpa_direct {
+ struct vmpacket_descriptor d;
+ u32 reserved;
+ u32 range_cnt;
+ struct gpa_range range[1];
+} __packed;
+
+/* This is the format for a Additional Data Packet. */
+struct vmadditional_data {
+ struct vmpacket_descriptor d;
+ u64 total_bytes;
+ u32 offset;
+ u32 byte_cnt;
+ unsigned char data[1];
+} __packed;
+
+union vmpacket_largest_possible_header {
+ struct vmpacket_descriptor simple_hdr;
+ struct vmtransfer_page_packet_header xfer_page_hdr;
+ struct vmgpadl_packet_header gpadl_hdr;
+ struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
+ struct vmestablish_gpadl establish_gpadl_hdr;
+ struct vmteardown_gpadl teardown_gpadl_hdr;
+ struct vmdata_gpa_direct data_gpa_direct_hdr;
+};
+
+#define VMPACKET_DATA_START_ADDRESS(__packet) \
+ (void *)(((unsigned char *)__packet) + \
+ ((struct vmpacket_descriptor)__packet)->offset8 * 8)
+
+#define VMPACKET_DATA_LENGTH(__packet) \
+ ((((struct vmpacket_descriptor)__packet)->len8 - \
+ ((struct vmpacket_descriptor)__packet)->offset8) * 8)
+
+#define VMPACKET_TRANSFER_MODE(__packet) \
+ (((struct IMPACT)__packet)->type)
+
+enum vmbus_packet_type {
+ VM_PKT_INVALID = 0x0,
+ VM_PKT_SYNCH = 0x1,
+ VM_PKT_ADD_XFER_PAGESET = 0x2,
+ VM_PKT_RM_XFER_PAGESET = 0x3,
+ VM_PKT_ESTABLISH_GPADL = 0x4,
+ VM_PKT_TEARDOWN_GPADL = 0x5,
+ VM_PKT_DATA_INBAND = 0x6,
+ VM_PKT_DATA_USING_XFER_PAGES = 0x7,
+ VM_PKT_DATA_USING_GPADL = 0x8,
+ VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
+ VM_PKT_CANCEL_REQUEST = 0xa,
+ VM_PKT_COMP = 0xb,
+ VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
+ VM_PKT_ADDITIONAL_DATA = 0xd
+};
+
+#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
+
+
+/* Version 1 messages */
+enum vmbus_channel_message_type {
+ CHANNELMSG_INVALID = 0,
+ CHANNELMSG_OFFERCHANNEL = 1,
+ CHANNELMSG_RESCIND_CHANNELOFFER = 2,
+ CHANNELMSG_REQUESTOFFERS = 3,
+ CHANNELMSG_ALLOFFERS_DELIVERED = 4,
+ CHANNELMSG_OPENCHANNEL = 5,
+ CHANNELMSG_OPENCHANNEL_RESULT = 6,
+ CHANNELMSG_CLOSECHANNEL = 7,
+ CHANNELMSG_GPADL_HEADER = 8,
+ CHANNELMSG_GPADL_BODY = 9,
+ CHANNELMSG_GPADL_CREATED = 10,
+ CHANNELMSG_GPADL_TEARDOWN = 11,
+ CHANNELMSG_GPADL_TORNDOWN = 12,
+ CHANNELMSG_RELID_RELEASED = 13,
+ CHANNELMSG_INITIATE_CONTACT = 14,
+ CHANNELMSG_VERSION_RESPONSE = 15,
+ CHANNELMSG_UNLOAD = 16,
+#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
+ CHANNELMSG_VIEWRANGE_ADD = 17,
+ CHANNELMSG_VIEWRANGE_REMOVE = 18,
+#endif
+ CHANNELMSG_COUNT
+};
+
+struct vmbus_channel_message_header {
+ enum vmbus_channel_message_type msgtype;
+ u32 padding;
+} __packed;
+
+/* Query VMBus Version parameters */
+struct vmbus_channel_query_vmbus_version {
+ struct vmbus_channel_message_header header;
+ u32 version;
+} __packed;
+
+/* VMBus Version Supported parameters */
+struct vmbus_channel_version_supported {
+ struct vmbus_channel_message_header header;
+ bool version_supported;
+} __packed;
+
+/* Offer Channel parameters */
+struct vmbus_channel_offer_channel {
+ struct vmbus_channel_message_header header;
+ struct vmbus_channel_offer offer;
+ u32 child_relid;
+ u8 monitorid;
+ bool monitor_allocated;
+} __packed;
+
+/* Rescind Offer parameters */
+struct vmbus_channel_rescind_offer {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+} __packed;
+
+/*
+ * Request Offer -- no parameters, SynIC message contains the partition ID
+ * Set Snoop -- no parameters, SynIC message contains the partition ID
+ * Clear Snoop -- no parameters, SynIC message contains the partition ID
+ * All Offers Delivered -- no parameters, SynIC message contains the partition
+ * ID
+ * Flush Client -- no parameters, SynIC message contains the partition ID
+ */
+
+/* Open Channel parameters */
+struct vmbus_channel_open_channel {
+ struct vmbus_channel_message_header header;
+
+ /* Identifies the specific VMBus channel that is being opened. */
+ u32 child_relid;
+
+ /* ID making a particular open request at a channel offer unique. */
+ u32 openid;
+
+ /* GPADL for the channel's ring buffer. */
+ u32 ringbuffer_gpadlhandle;
+
+ /* GPADL for the channel's server context save area. */
+ u32 server_contextarea_gpadlhandle;
+
+ /*
+ * The upstream ring buffer begins at offset zero in the memory
+ * described by RingBufferGpadlHandle. The downstream ring buffer
+ * follows it at this offset (in pages).
+ */
+ u32 downstream_ringbuffer_pageoffset;
+
+ /* User-specific data to be passed along to the server endpoint. */
+ unsigned char userdata[MAX_USER_DEFINED_BYTES];
+} __packed;
+
+/* Open Channel Result parameters */
+struct vmbus_channel_open_result {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 openid;
+ u32 status;
+} __packed;
+
+/* Close channel parameters; */
+struct vmbus_channel_close_channel {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+} __packed;
+
+/* Channel Message GPADL */
+#define GPADL_TYPE_RING_BUFFER 1
+#define GPADL_TYPE_SERVER_SAVE_AREA 2
+#define GPADL_TYPE_TRANSACTION 8
+
+/*
+ * The number of PFNs in a GPADL message is defined by the number of
+ * pages that would be spanned by ByteCount and ByteOffset. If the
+ * implied number of PFNs won't fit in this packet, there will be a
+ * follow-up packet that contains more.
+ */
+struct vmbus_channel_gpadl_header {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+ u16 range_buflen;
+ u16 rangecount;
+ struct gpa_range range[0];
+} __packed;
+
+/* This is the followup packet that contains more PFNs. */
+struct vmbus_channel_gpadl_body {
+ struct vmbus_channel_message_header header;
+ u32 msgnumber;
+ u32 gpadl;
+ u64 pfn[0];
+} __packed;
+
+struct vmbus_channel_gpadl_created {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+ u32 creation_status;
+} __packed;
+
+struct vmbus_channel_gpadl_teardown {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+} __packed;
+
+struct vmbus_channel_gpadl_torndown {
+ struct vmbus_channel_message_header header;
+ u32 gpadl;
+} __packed;
+
+#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
+struct vmbus_channel_view_range_add {
+ struct vmbus_channel_message_header header;
+ PHYSICAL_ADDRESS viewrange_base;
+ u64 viewrange_length;
+ u32 child_relid;
+} __packed;
+
+struct vmbus_channel_view_range_remove {
+ struct vmbus_channel_message_header header;
+ PHYSICAL_ADDRESS viewrange_base;
+ u32 child_relid;
+} __packed;
+#endif
+
+struct vmbus_channel_relid_released {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+} __packed;
+
+struct vmbus_channel_initiate_contact {
+ struct vmbus_channel_message_header header;
+ u32 vmbus_version_requested;
+ u32 padding2;
+ u64 interrupt_page;
+ u64 monitor_page1;
+ u64 monitor_page2;
+} __packed;
+
+struct vmbus_channel_version_response {
+ struct vmbus_channel_message_header header;
+ bool version_supported;
+} __packed;
+
+enum vmbus_channel_state {
+ CHANNEL_OFFER_STATE,
+ CHANNEL_OPENING_STATE,
+ CHANNEL_OPEN_STATE,
+};
+
+struct vmbus_channel_debug_info {
+ u32 relid;
+ enum vmbus_channel_state state;
+ uuid_le interfacetype;
+ uuid_le interface_instance;
+ u32 monitorid;
+ u32 servermonitor_pending;
+ u32 servermonitor_latency;
+ u32 servermonitor_connectionid;
+ u32 clientmonitor_pending;
+ u32 clientmonitor_latency;
+ u32 clientmonitor_connectionid;
+
+ struct hv_ring_buffer_debug_info inbound;
+ struct hv_ring_buffer_debug_info outbound;
+};
+
+/*
+ * Represents each channel msg on the vmbus connection This is a
+ * variable-size data structure depending on the msg type itself
+ */
+struct vmbus_channel_msginfo {
+ /* Bookkeeping stuff */
+ struct list_head msglistentry;
+
+ /* So far, this is only used to handle gpadl body message */
+ struct list_head submsglist;
+
+ /* Synchronize the request/response if needed */
+ struct completion waitevent;
+ union {
+ struct vmbus_channel_version_supported version_supported;
+ struct vmbus_channel_open_result open_result;
+ struct vmbus_channel_gpadl_torndown gpadl_torndown;
+ struct vmbus_channel_gpadl_created gpadl_created;
+ struct vmbus_channel_version_response version_response;
+ } response;
+
+ u32 msgsize;
+ /*
+ * The channel message that goes out on the "wire".
+ * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
+ */
+ unsigned char msg[0];
+};
+
+struct vmbus_close_msg {
+ struct vmbus_channel_msginfo info;
+ struct vmbus_channel_close_channel msg;
+};
+
+struct vmbus_channel {
+ struct list_head listentry;
+
+ struct hv_device *device_obj;
+
+ struct work_struct work;
+
+ enum vmbus_channel_state state;
+
+ struct vmbus_channel_offer_channel offermsg;
+ /*
+ * These are based on the OfferMsg.MonitorId.
+ * Save it here for easy access.
+ */
+ u8 monitor_grp;
+ u8 monitor_bit;
+
+ u32 ringbuffer_gpadlhandle;
+
+ /* Allocated memory for ring buffer */
+ void *ringbuffer_pages;
+ u32 ringbuffer_pagecount;
+ struct hv_ring_buffer_info outbound; /* send to parent */
+ struct hv_ring_buffer_info inbound; /* receive from parent */
+ spinlock_t inbound_lock;
+ struct workqueue_struct *controlwq;
+
+ struct vmbus_close_msg close_msg;
+
+ /* Channel callback are invoked in this workqueue context */
+ /* HANDLE dataWorkQueue; */
+
+ void (*onchannel_callback)(void *context);
+ void *channel_callback_context;
+};
+
+void vmbus_onmessage(void *context);
+
+int vmbus_request_offers(void);
+
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_channel_packet_page_buffer {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount;
+ struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
+} __packed;
+
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_channel_packet_multipage_buffer {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount; /* Always 1 in this case */
+ struct hv_multipage_buffer range;
+} __packed;
+
+
+extern int vmbus_open(struct vmbus_channel *channel,
+ u32 send_ringbuffersize,
+ u32 recv_ringbuffersize,
+ void *userdata,
+ u32 userdatalen,
+ void(*onchannel_callback)(void *context),
+ void *context);
+
+extern void vmbus_close(struct vmbus_channel *channel);
+
+extern int vmbus_sendpacket(struct vmbus_channel *channel,
+ const void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ enum vmbus_packet_type type,
+ u32 flags);
+
+extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
+extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
+ struct hv_multipage_buffer *mpb,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
+extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
+ void *kbuffer,
+ u32 size,
+ u32 *gpadl_handle);
+
+extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
+ u32 gpadl_handle);
+
+extern int vmbus_recvpacket(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferlen,
+ u32 *buffer_actual_len,
+ u64 *requestid);
+
+extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferlen,
+ u32 *buffer_actual_len,
+ u64 *requestid);
+
+
+extern void vmbus_get_debug_info(struct vmbus_channel *channel,
+ struct vmbus_channel_debug_info *debug);
+
+extern void vmbus_ontimer(unsigned long data);
+
+struct hv_dev_port_info {
+ u32 int_mask;
+ u32 read_idx;
+ u32 write_idx;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
+
+/* Base driver object */
+struct hv_driver {
+ const char *name;
+
+ /* the device type supported by this driver */
+ uuid_le dev_type;
+ const struct hv_vmbus_device_id *id_table;
+
+ struct device_driver driver;
+
+ int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
+ int (*remove)(struct hv_device *);
+ void (*shutdown)(struct hv_device *);
+
+};
+
+/* Base device object */
+struct hv_device {
+ /* the device type id of this device */
+ uuid_le dev_type;
+
+ /* the device instance id of this device */
+ uuid_le dev_instance;
+
+ struct device device;
+
+ struct vmbus_channel *channel;
+};
+
+
+static inline struct hv_device *device_to_hv_device(struct device *d)
+{
+ return container_of(d, struct hv_device, device);
+}
+
+static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
+{
+ return container_of(d, struct hv_driver, driver);
+}
+
+static inline void hv_set_drvdata(struct hv_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->device, data);
+}
+
+static inline void *hv_get_drvdata(struct hv_device *dev)
+{
+ return dev_get_drvdata(&dev->device);
+}
+
+/* Vmbus interface */
+#define vmbus_driver_register(driver) \
+ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
+ struct module *owner,
+ const char *mod_name);
+void vmbus_driver_unregister(struct hv_driver *hv_driver);
+
+/**
+ * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
+ *
+ * This macro is used to create a struct hv_vmbus_device_id that matches a
+ * specific device.
+ */
+#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \
+ g8, g9, ga, gb, gc, gd, ge, gf) \
+ .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
+ g8, g9, ga, gb, gc, gd, ge, gf },
+
+/*
+ * Common header for Hyper-V ICs
+ */
+
+#define ICMSGTYPE_NEGOTIATE 0
+#define ICMSGTYPE_HEARTBEAT 1
+#define ICMSGTYPE_KVPEXCHANGE 2
+#define ICMSGTYPE_SHUTDOWN 3
+#define ICMSGTYPE_TIMESYNC 4
+#define ICMSGTYPE_VSS 5
+
+#define ICMSGHDRFLAG_TRANSACTION 1
+#define ICMSGHDRFLAG_REQUEST 2
+#define ICMSGHDRFLAG_RESPONSE 4
+
+#define HV_S_OK 0x00000000
+#define HV_E_FAIL 0x80004005
+#define HV_ERROR_NOT_SUPPORTED 0x80070032
+#define HV_ERROR_MACHINE_LOCKED 0x800704F7
+
+/*
+ * While we want to handle util services as regular devices,
+ * there is only one instance of each of these services; so
+ * we statically allocate the service specific state.
+ */
+
+struct hv_util_service {
+ u8 *recv_buffer;
+ void (*util_cb)(void *);
+ int (*util_init)(struct hv_util_service *);
+ void (*util_deinit)(void);
+};
+
+struct vmbuspipe_hdr {
+ u32 flags;
+ u32 msgsize;
+} __packed;
+
+struct ic_version {
+ u16 major;
+ u16 minor;
+} __packed;
+
+struct icmsg_hdr {
+ struct ic_version icverframe;
+ u16 icmsgtype;
+ struct ic_version icvermsg;
+ u16 icmsgsize;
+ u32 status;
+ u8 ictransaction_id;
+ u8 icflags;
+ u8 reserved[2];
+} __packed;
+
+struct icmsg_negotiate {
+ u16 icframe_vercnt;
+ u16 icmsg_vercnt;
+ u32 reserved;
+ struct ic_version icversion_data[1]; /* any size array */
+} __packed;
+
+struct shutdown_msg_data {
+ u32 reason_code;
+ u32 timeout_seconds;
+ u32 flags;
+ u8 display_message[2048];
+} __packed;
+
+struct heartbeat_msg_data {
+ u64 seq_num;
+ u32 reserved[8];
+} __packed;
+
+/* Time Sync IC defs */
+#define ICTIMESYNCFLAG_PROBE 0
+#define ICTIMESYNCFLAG_SYNC 1
+#define ICTIMESYNCFLAG_SAMPLE 2
+
+#ifdef __x86_64__
+#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
+#else
+#define WLTIMEDELTA 116444736000000000LL
+#endif
+
+struct ictimesync_data {
+ u64 parenttime;
+ u64 childtime;
+ u64 roundtriptime;
+ u8 flags;
+} __packed;
+
+struct hyperv_service_callback {
+ u8 msg_type;
+ char *log_msg;
+ uuid_le data;
+ struct vmbus_channel *channel;
+ void (*callback) (void *context);
+};
+
+extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
+ struct icmsg_negotiate *, u8 *);
+
+#endif /* _HYPERV_H */
diff --git a/include/linux/i2c/tsc2007.h b/include/linux/i2c/tsc2007.h
index 591427a..506a9f7 100644
--- a/include/linux/i2c/tsc2007.h
+++ b/include/linux/i2c/tsc2007.h
@@ -5,7 +5,7 @@
struct tsc2007_platform_data {
u16 model; /* 2007. */
- u16 x_plate_ohms;
+ u16 x_plate_ohms; /* must be non-zero value */
u16 max_rt; /* max. resistance above which samples are ignored */
unsigned long poll_delay; /* delay (in ms) after pen-down event
before polling starts */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index e28b2e4..d103dca 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -64,6 +64,7 @@ struct macvlan_dev {
int (*forward)(struct net_device *dev, struct sk_buff *skb);
struct macvtap_queue *taps[MAX_MACVTAP_QUEUES];
int numvtaps;
+ int minor;
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 09e6e62..6ac8e50 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,8 +15,6 @@ struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_inode_alloc(struct inode *inode);
-extern void ima_inode_free(struct inode *inode);
extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
@@ -27,16 +25,6 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_inode_alloc(struct inode *inode)
-{
- return 0;
-}
-
-static inline void ima_inode_free(struct inode *inode)
-{
- return;
-}
-
static inline int ima_file_check(struct file *file, int mask)
{
return 0;
@@ -51,6 +39,5 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
{
return 0;
}
-
#endif /* CONFIG_IMA_H */
#endif /* _LINUX_IMA_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d14e058..08ffab0 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -42,7 +42,7 @@ extern struct fs_struct init_fs;
.cputimer = { \
.cputime = INIT_CPUTIME, \
.running = 0, \
- .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
}, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
diff --git a/include/linux/input.h b/include/linux/input.h
index a637e78..6d5eddb 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -505,6 +505,7 @@ struct input_keymap_entry {
#define BTN_TOOL_FINGER 0x145
#define BTN_TOOL_MOUSE 0x146
#define BTN_TOOL_LENS 0x147
+#define BTN_TOOL_QUINTTAP 0x148 /* Five fingers on trackpad */
#define BTN_TOUCH 0x14a
#define BTN_STYLUS 0x14b
#define BTN_STYLUS2 0x14c
@@ -1609,7 +1610,7 @@ struct ff_device {
struct file *effect_owners[];
};
-int input_ff_create(struct input_dev *dev, int max_effects);
+int input_ff_create(struct input_dev *dev, unsigned int max_effects);
void input_ff_destroy(struct input_dev *dev);
int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
index ef792ec..1a05eee 100644
--- a/include/linux/input/adp5589.h
+++ b/include/linux/input/adp5589.h
@@ -1,5 +1,5 @@
/*
- * Analog Devices ADP5589 I/O Expander and QWERTY Keypad Controller
+ * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller
*
* Copyright 2010-2011 Analog Devices Inc.
*
@@ -9,89 +9,9 @@
#ifndef _ADP5589_H
#define _ADP5589_H
-#define ADP5589_ID 0x00
-#define ADP5589_INT_STATUS 0x01
-#define ADP5589_STATUS 0x02
-#define ADP5589_FIFO_1 0x03
-#define ADP5589_FIFO_2 0x04
-#define ADP5589_FIFO_3 0x05
-#define ADP5589_FIFO_4 0x06
-#define ADP5589_FIFO_5 0x07
-#define ADP5589_FIFO_6 0x08
-#define ADP5589_FIFO_7 0x09
-#define ADP5589_FIFO_8 0x0A
-#define ADP5589_FIFO_9 0x0B
-#define ADP5589_FIFO_10 0x0C
-#define ADP5589_FIFO_11 0x0D
-#define ADP5589_FIFO_12 0x0E
-#define ADP5589_FIFO_13 0x0F
-#define ADP5589_FIFO_14 0x10
-#define ADP5589_FIFO_15 0x11
-#define ADP5589_FIFO_16 0x12
-#define ADP5589_GPI_INT_STAT_A 0x13
-#define ADP5589_GPI_INT_STAT_B 0x14
-#define ADP5589_GPI_INT_STAT_C 0x15
-#define ADP5589_GPI_STATUS_A 0x16
-#define ADP5589_GPI_STATUS_B 0x17
-#define ADP5589_GPI_STATUS_C 0x18
-#define ADP5589_RPULL_CONFIG_A 0x19
-#define ADP5589_RPULL_CONFIG_B 0x1A
-#define ADP5589_RPULL_CONFIG_C 0x1B
-#define ADP5589_RPULL_CONFIG_D 0x1C
-#define ADP5589_RPULL_CONFIG_E 0x1D
-#define ADP5589_GPI_INT_LEVEL_A 0x1E
-#define ADP5589_GPI_INT_LEVEL_B 0x1F
-#define ADP5589_GPI_INT_LEVEL_C 0x20
-#define ADP5589_GPI_EVENT_EN_A 0x21
-#define ADP5589_GPI_EVENT_EN_B 0x22
-#define ADP5589_GPI_EVENT_EN_C 0x23
-#define ADP5589_GPI_INTERRUPT_EN_A 0x24
-#define ADP5589_GPI_INTERRUPT_EN_B 0x25
-#define ADP5589_GPI_INTERRUPT_EN_C 0x26
-#define ADP5589_DEBOUNCE_DIS_A 0x27
-#define ADP5589_DEBOUNCE_DIS_B 0x28
-#define ADP5589_DEBOUNCE_DIS_C 0x29
-#define ADP5589_GPO_DATA_OUT_A 0x2A
-#define ADP5589_GPO_DATA_OUT_B 0x2B
-#define ADP5589_GPO_DATA_OUT_C 0x2C
-#define ADP5589_GPO_OUT_MODE_A 0x2D
-#define ADP5589_GPO_OUT_MODE_B 0x2E
-#define ADP5589_GPO_OUT_MODE_C 0x2F
-#define ADP5589_GPIO_DIRECTION_A 0x30
-#define ADP5589_GPIO_DIRECTION_B 0x31
-#define ADP5589_GPIO_DIRECTION_C 0x32
-#define ADP5589_UNLOCK1 0x33
-#define ADP5589_UNLOCK2 0x34
-#define ADP5589_EXT_LOCK_EVENT 0x35
-#define ADP5589_UNLOCK_TIMERS 0x36
-#define ADP5589_LOCK_CFG 0x37
-#define ADP5589_RESET1_EVENT_A 0x38
-#define ADP5589_RESET1_EVENT_B 0x39
-#define ADP5589_RESET1_EVENT_C 0x3A
-#define ADP5589_RESET2_EVENT_A 0x3B
-#define ADP5589_RESET2_EVENT_B 0x3C
-#define ADP5589_RESET_CFG 0x3D
-#define ADP5589_PWM_OFFT_LOW 0x3E
-#define ADP5589_PWM_OFFT_HIGH 0x3F
-#define ADP5589_PWM_ONT_LOW 0x40
-#define ADP5589_PWM_ONT_HIGH 0x41
-#define ADP5589_PWM_CFG 0x42
-#define ADP5589_CLOCK_DIV_CFG 0x43
-#define ADP5589_LOGIC_1_CFG 0x44
-#define ADP5589_LOGIC_2_CFG 0x45
-#define ADP5589_LOGIC_FF_CFG 0x46
-#define ADP5589_LOGIC_INT_EVENT_EN 0x47
-#define ADP5589_POLL_PTIME_CFG 0x48
-#define ADP5589_PIN_CONFIG_A 0x49
-#define ADP5589_PIN_CONFIG_B 0x4A
-#define ADP5589_PIN_CONFIG_C 0x4B
-#define ADP5589_PIN_CONFIG_D 0x4C
-#define ADP5589_GENERAL_CFG 0x4D
-#define ADP5589_INT_EN 0x4E
-
-#define ADP5589_DEVICE_ID_MASK 0xF
-
-/* Put one of these structures in i2c_board_info platform_data */
+/*
+ * ADP5589 specific GPI and Keymap defines
+ */
#define ADP5589_KEYMAPSIZE 88
@@ -127,6 +47,35 @@
#define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1)
+/*
+ * ADP5585 specific GPI and Keymap defines
+ */
+
+#define ADP5585_KEYMAPSIZE 30
+
+#define ADP5585_GPI_PIN_ROW0 37
+#define ADP5585_GPI_PIN_ROW1 38
+#define ADP5585_GPI_PIN_ROW2 39
+#define ADP5585_GPI_PIN_ROW3 40
+#define ADP5585_GPI_PIN_ROW4 41
+#define ADP5585_GPI_PIN_ROW5 42
+#define ADP5585_GPI_PIN_COL0 43
+#define ADP5585_GPI_PIN_COL1 44
+#define ADP5585_GPI_PIN_COL2 45
+#define ADP5585_GPI_PIN_COL3 46
+#define ADP5585_GPI_PIN_COL4 47
+#define GPI_LOGIC 48
+
+#define ADP5585_GPI_PIN_ROW_BASE ADP5585_GPI_PIN_ROW0
+#define ADP5585_GPI_PIN_ROW_END ADP5585_GPI_PIN_ROW5
+#define ADP5585_GPI_PIN_COL_BASE ADP5585_GPI_PIN_COL0
+#define ADP5585_GPI_PIN_COL_END ADP5585_GPI_PIN_COL4
+
+#define ADP5585_GPI_PIN_BASE ADP5585_GPI_PIN_ROW_BASE
+#define ADP5585_GPI_PIN_END ADP5585_GPI_PIN_COL_END
+
+#define ADP5585_GPIMAPSIZE_MAX (ADP5585_GPI_PIN_END - ADP5585_GPI_PIN_BASE + 1)
+
struct adp5589_gpi_map {
unsigned short pin;
unsigned short sw_evt;
@@ -159,7 +108,7 @@ struct adp5589_gpi_map {
#define RESET2_POL_HIGH (1 << 7)
#define RESET2_POL_LOW (0 << 7)
-/* Mask Bits:
+/* ADP5589 Mask Bits:
* C C C C C C C C C C C | R R R R R R R R
* 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0
* 0
@@ -168,18 +117,44 @@ struct adp5589_gpi_map {
* 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0
*/
-#define ADP_ROW(x) (1 << (x))
-#define ADP_COL(x) (1 << (x + 8))
+#define ADP_ROW(x) (1 << (x))
+#define ADP_COL(x) (1 << (x + 8))
+#define ADP5589_ROW_MASK 0xFF
+#define ADP5589_COL_MASK 0xFF
+#define ADP5589_COL_SHIFT 8
+#define ADP5589_MAX_ROW_NUM 7
+#define ADP5589_MAX_COL_NUM 10
+
+/* ADP5585 Mask Bits:
+ * C C C C C | R R R R R R
+ * 4 3 2 1 0 | 5 4 3 2 1 0
+ *
+ * ---- BIT -- -----------
+ * 1 0 0 0 0 | 0 0 0 0 0 0
+ * 0 9 8 7 6 | 5 4 3 2 1 0
+ */
+
+#define ADP5585_ROW_MASK 0x3F
+#define ADP5585_COL_MASK 0x1F
+#define ADP5585_ROW_SHIFT 0
+#define ADP5585_COL_SHIFT 6
+#define ADP5585_MAX_ROW_NUM 5
+#define ADP5585_MAX_COL_NUM 4
+
+#define ADP5585_ROW(x) (1 << ((x) & ADP5585_ROW_MASK))
+#define ADP5585_COL(x) (1 << (((x) & ADP5585_COL_MASK) + ADP5585_COL_SHIFT))
+
+/* Put one of these structures in i2c_board_info platform_data */
struct adp5589_kpad_platform_data {
unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */
const unsigned short *keymap; /* Pointer to keymap */
unsigned short keymapsize; /* Keymap size */
bool repeat; /* Enable key repeat */
- bool en_keylock; /* Enable key lock feature */
- unsigned char unlock_key1; /* Unlock Key 1 */
- unsigned char unlock_key2; /* Unlock Key 2 */
- unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable */
+ bool en_keylock; /* Enable key lock feature (ADP5589 only)*/
+ unsigned char unlock_key1; /* Unlock Key 1 (ADP5589 only) */
+ unsigned char unlock_key2; /* Unlock Key 2 (ADP5589 only) */
+ unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable (ADP5589 only) */
unsigned char scan_cycle_time; /* Time between consecutive scan cycles */
unsigned char reset_cfg; /* Reset config */
unsigned short reset1_key_1; /* Reset Key 1 */
diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h
index df00d99..57e01a7 100644
--- a/include/linux/input/adxl34x.h
+++ b/include/linux/input/adxl34x.h
@@ -30,8 +30,9 @@ struct adxl34x_platform_data {
* Y, or Z participation in Tap detection. A '0' excludes the
* selected axis from participation in Tap detection.
* Setting the SUPPRESS bit suppresses Double Tap detection if
- * acceleration greater than tap_threshold is present between
- * taps.
+ * acceleration greater than tap_threshold is present during the
+ * tap_latency period, i.e. after the first tap but before the
+ * opening of the second tap window.
*/
#define ADXL_SUPPRESS (1 << 3)
@@ -226,13 +227,13 @@ struct adxl34x_platform_data {
* detection will begin and prevent the detection of activity. This
* bit serially links the activity and inactivity functions. When '0'
* the inactivity and activity functions are concurrent. Additional
- * information can be found in the Application section under Link
- * Mode.
+ * information can be found in the ADXL34x datasheet's Application
+ * section under Link Mode.
* AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode
* when inactivity (acceleration has been below inactivity_threshold
* for at least inactivity_time) is detected and the LINK bit is set.
- * A '0' disables automatic switching to Sleep Mode. See SLEEP
- * for further description.
+ * A '0' disables automatic switching to Sleep Mode. See the
+ * Sleep Bit section of the ADXL34x datasheet for more information.
*/
#define ADXL_LINK (1 << 5)
@@ -266,6 +267,12 @@ struct adxl34x_platform_data {
u8 watermark;
+ /*
+ * When acceleration measurements are received from the ADXL34x
+ * events are sent to the event subsystem. The following settings
+ * select the event type and event code for new x, y and z axis data
+ * respectively.
+ */
u32 ev_type; /* EV_ABS or EV_REL */
u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */
@@ -289,7 +296,7 @@ struct adxl34x_platform_data {
u32 ev_code_act_inactivity; /* EV_KEY */
/*
- * Use ADXL34x INT2 instead of INT1
+ * Use ADXL34x INT2 pin instead of INT1 pin for interrupt output
*/
u8 use_int2;
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
new file mode 100644
index 0000000..a0c4125
--- /dev/null
+++ b/include/linux/integrity.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_INTEGRITY_H
+#define _LINUX_INTEGRITY_H
+
+#include <linux/fs.h>
+
+enum integrity_status {
+ INTEGRITY_PASS = 0,
+ INTEGRITY_FAIL,
+ INTEGRITY_NOLABEL,
+ INTEGRITY_NOXATTRS,
+ INTEGRITY_UNKNOWN,
+};
+
+/* List of EVM protected security xattrs */
+#ifdef CONFIG_INTEGRITY
+extern int integrity_inode_alloc(struct inode *inode);
+extern void integrity_inode_free(struct inode *inode);
+
+#else
+static inline int integrity_inode_alloc(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void integrity_inode_free(struct inode *inode)
+{
+ return;
+}
+#endif /* CONFIG_INTEGRITY_H */
+#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 9310c69..e6ca56d 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -271,7 +271,7 @@ struct qi_desc {
};
struct q_inval {
- spinlock_t q_lock;
+ raw_spinlock_t q_lock;
struct qi_desc *desc; /* invalidation queue */
int *desc_status; /* desc status */
int free_head; /* first free entry */
@@ -279,7 +279,7 @@ struct q_inval {
int free_cnt;
};
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
@@ -311,14 +311,14 @@ struct intel_iommu {
u64 cap;
u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
- spinlock_t register_lock; /* protect register handling */
+ raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
unsigned int irq;
unsigned char name[13]; /* Device Name */
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain **domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
@@ -329,7 +329,7 @@ struct intel_iommu {
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
-#ifdef CONFIG_INTR_REMAP
+#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
#endif
int node;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a103732..a64b00e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -59,6 +59,8 @@
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ * resume time.
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -72,6 +74,7 @@
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
+#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -95,6 +98,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @flags: flags (see IRQF_* above)
* @name: name of the device
* @dev_id: cookie to identify the device
+ * @percpu_dev_id: cookie to identify the device
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @dir: pointer to the proc/irq/NN/name entry
@@ -104,17 +108,18 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @thread_mask: bitmask for keeping track of @thread activity
*/
struct irqaction {
- irq_handler_t handler;
- unsigned long flags;
- void *dev_id;
- struct irqaction *next;
- int irq;
- irq_handler_t thread_fn;
- struct task_struct *thread;
- unsigned long thread_flags;
- unsigned long thread_mask;
- const char *name;
- struct proc_dir_entry *dir;
+ irq_handler_t handler;
+ unsigned long flags;
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ struct irqaction *next;
+ int irq;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned long thread_flags;
+ unsigned long thread_mask;
+ const char *name;
+ struct proc_dir_entry *dir;
} ____cacheline_internodealigned_in_smp;
extern irqreturn_t no_action(int cpl, void *dev_id);
@@ -136,6 +141,10 @@ extern int __must_check
request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
+extern int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *percpu_dev_id);
+
extern void exit_irq_thread(void);
#else
@@ -164,10 +173,18 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
return request_irq(irq, handler, flags, name, dev_id);
}
+static inline int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *percpu_dev_id)
+{
+ return request_irq(irq, handler, 0, devname, percpu_dev_id);
+}
+
static inline void exit_irq_thread(void) { }
#endif
extern void free_irq(unsigned int, void *);
+extern void free_percpu_irq(unsigned int, void __percpu *);
struct device;
@@ -207,7 +224,9 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
+extern void disable_percpu_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
+extern void enable_percpu_irq(unsigned int irq, unsigned int type);
/* The following three functions are for the core kernel use only. */
#ifdef CONFIG_GENERIC_HARDIRQS
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 8cdcc2a1..e44e84f 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -27,7 +27,7 @@
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io_mapping.txt
+ * See Documentation/io-mapping.txt
*/
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
@@ -117,6 +117,8 @@ io_mapping_unmap(void __iomem *vaddr)
#else
+#include <linux/uaccess.h>
+
/* this struct isn't actually defined anywhere */
struct io_mapping;
@@ -138,12 +140,14 @@ static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
+ pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
+ pagefault_enable();
}
/* Non-atomic map/unmap */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 5951730..59e49c8 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -66,6 +66,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
*/
enum {
IRQ_TYPE_NONE = 0x00000000,
@@ -88,12 +89,13 @@ enum {
IRQ_MOVE_PCNTXT = (1 << 14),
IRQ_NESTED_THREAD = (1 << 15),
IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
- IRQ_PER_CPU | IRQ_NESTED_THREAD)
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
@@ -336,12 +338,14 @@ struct irq_chip {
* IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
* IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
* when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
IRQCHIP_EOI_IF_HANDLED = (1 << 1),
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
};
/* This include will go away once we isolated irq_desc usage to core code */
@@ -365,6 +369,8 @@ enum {
struct irqaction;
extern int setup_irq(unsigned int irq, struct irqaction *new);
extern void remove_irq(unsigned int irq, struct irqaction *act);
+extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
+extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
@@ -392,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
extern void handle_nested_irq(unsigned int irq);
@@ -420,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
}
+extern int irq_set_percpu_devid(unsigned int irq);
+
extern void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name);
@@ -481,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
}
+static inline void irq_set_percpu_devid_flags(unsigned int irq)
+{
+ irq_set_status_flags(irq,
+ IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
+ IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
+}
+
/* Handle dynamic irq creation and destruction */
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index 4fa09d4..6a9e8f5 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -1,20 +1,23 @@
#ifndef _LINUX_IRQ_WORK_H
#define _LINUX_IRQ_WORK_H
+#include <linux/llist.h>
+
struct irq_work {
- struct irq_work *next;
+ unsigned long flags;
+ struct llist_node llnode;
void (*func)(struct irq_work *);
};
static inline
-void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
+void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- entry->next = NULL;
- entry->func = func;
+ work->flags = 0;
+ work->func = func;
}
-bool irq_work_queue(struct irq_work *entry);
+bool irq_work_queue(struct irq_work *work);
void irq_work_run(void);
-void irq_work_sync(struct irq_work *entry);
+void irq_work_sync(struct irq_work *work);
#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 150134a..6b69c2c 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -53,6 +53,7 @@ struct irq_desc {
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index 44cd663..4ccf95d 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -68,7 +68,7 @@
#define ISDN_NET_ENCAP_SYNCPPP 4
#define ISDN_NET_ENCAP_UIHDLC 5
#define ISDN_NET_ENCAP_CISCOHDLCK 6 /* With SLARP and keepalive */
-#define ISDN_NET_ENCAP_X25IFACE 7 /* Documentation/networking/x25-iface.txt*/
+#define ISDN_NET_ENCAP_X25IFACE 7 /* Documentation/networking/x25-iface.txt */
#define ISDN_NET_ENCAP_MAX_ENCAP ISDN_NET_ENCAP_X25IFACE
/* Facility which currently uses an ISDN-channel */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index f97672a..265e2c3 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -303,7 +303,7 @@ extern void jiffies_to_timespec(const unsigned long jiffies,
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
-extern clock_t jiffies_to_clock_t(long x);
+extern clock_t jiffies_to_clock_t(unsigned long x);
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 46ac9a5..8eefcf7 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -382,7 +382,7 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
}
extern int hex_to_bin(char ch);
-extern void hex2bin(u8 *dst, const char *src, size_t count);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
/*
* General tracing related utility functions - trace_printk(),
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index dd7c12e..dce6e4d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -181,7 +181,7 @@ struct kretprobe {
int nmissed;
size_t data_size;
struct hlist_head free_instances;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
struct kretprobe_instance {
diff --git a/include/linux/llist.h b/include/linux/llist.h
index aa0c8b5..7287734 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -35,10 +35,30 @@
*
* The basic atomic operation of this list is cmpxchg on long. On
* architectures that don't have NMI-safe cmpxchg implementation, the
- * list can NOT be used in NMI handler. So code uses the list in NMI
- * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ * list can NOT be used in NMI handlers. So code that uses the list in
+ * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2010,2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/kernel.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+
struct llist_head {
struct llist_node *first;
};
@@ -113,14 +133,55 @@ static inline void init_llist_head(struct llist_head *list)
* test whether the list is empty without deleting something from the
* list.
*/
-static inline int llist_empty(const struct llist_head *head)
+static inline bool llist_empty(const struct llist_head *head)
{
return ACCESS_ONCE(head->first) == NULL;
}
-void llist_add(struct llist_node *new, struct llist_head *head);
-void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
- struct llist_head *head);
-struct llist_node *llist_del_first(struct llist_head *head);
-struct llist_node *llist_del_all(struct llist_head *head);
+static inline struct llist_node *llist_next(struct llist_node *node)
+{
+ return node->next;
+}
+
+/**
+ * llist_add - add a new entry
+ * @new: new entry to be added
+ * @head: the head for your lock-less list
+ *
+ * Return whether list is empty before adding.
+ */
+static inline bool llist_add(struct llist_node *new, struct llist_head *head)
+{
+ struct llist_node *entry, *old_entry;
+
+ entry = head->first;
+ for (;;) {
+ old_entry = entry;
+ new->next = entry;
+ entry = cmpxchg(&head->first, old_entry, new);
+ if (entry == old_entry)
+ break;
+ }
+
+ return old_entry == NULL;
+}
+
+/**
+ * llist_del_all - delete all entries from lock-less list
+ * @head: the head of lock-less list to delete all entries
+ *
+ * If list is empty, return NULL, otherwise, delete all entries and
+ * return the pointer to the first entry. The order of entries
+ * deleted is from the newest to the oldest added one.
+ */
+static inline struct llist_node *llist_del_all(struct llist_head *head)
+{
+ return xchg(&head->first, NULL);
+}
+
+extern bool llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head);
+extern struct llist_node *llist_del_first(struct llist_head *head);
+
#endif /* LLIST_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index ef820a3c..b6a56e3 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -548,7 +548,7 @@ do { \
#endif
#ifdef CONFIG_PROVE_RCU
-extern void lockdep_rcu_dereference(const char *file, const int line);
+void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#endif
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 8dda8de..ed8fe0d 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -18,6 +18,7 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/regmap.h>
/*
* Register values.
@@ -361,12 +362,8 @@ struct wm831x {
struct mutex io_lock;
struct device *dev;
- int (*read_dev)(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *dest);
- int (*write_dev)(struct wm831x *wm831x, unsigned short reg,
- int bytes, void *src);
- void *control_data;
+ struct regmap *regmap;
int irq; /* Our chip IRQ */
struct mutex irq_lock;
@@ -374,6 +371,8 @@ struct wm831x {
int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+ bool soft_shutdown;
+
/* Chip revision based flags */
unsigned has_gpio_ena:1; /* Has GPIO enable bit */
unsigned has_cs_sts:1; /* Has current sink status bit */
@@ -412,8 +411,11 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq);
void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
+void wm831x_device_shutdown(struct wm831x *wm831x);
int wm831x_irq_init(struct wm831x *wm831x, int irq);
void wm831x_irq_exit(struct wm831x *wm831x);
void wm831x_auxadc_init(struct wm831x *wm831x);
+extern struct regmap_config wm831x_regmap_config;
+
#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 0ba2459..1d7a3f7 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -123,6 +123,9 @@ struct wm831x_pdata {
/** Disable the touchscreen */
bool disable_touch;
+ /** The driver should initiate a power off sequence during shutdown */
+ bool soft_shutdown;
+
int irq_base;
int gpio_base;
int gpio_defaults[WM831X_GPIO_NUM];
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 2aab4e9..0147b69 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -25,16 +25,15 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
+struct regmap;
+
#define WM8400_REGISTER_COUNT 0x55
struct wm8400 {
struct device *dev;
- int (*read_dev)(void *data, char reg, int count, u16 *dst);
- int (*write_dev)(void *data, char reg, int count, const u16 *src);
-
struct mutex io_lock;
- void *io_data;
+ struct regmap *regmap;
u16 reg_cache[WM8400_REGISTER_COUNT];
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index f0b69cd..45df450 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -24,6 +24,7 @@ enum wm8994_type {
struct regulator_dev;
struct regulator_bulk_data;
+struct regmap;
#define WM8994_NUM_GPIO_REGS 11
#define WM8994_NUM_LDO_REGS 2
@@ -50,18 +51,12 @@ struct regulator_bulk_data;
#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
struct wm8994 {
- struct mutex io_lock;
struct mutex irq_lock;
enum wm8994_type type;
struct device *dev;
- int (*read_dev)(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *dest);
- int (*write_dev)(struct wm8994 *wm8994, unsigned short reg,
- int bytes, const void *src);
-
- void *control_data;
+ struct regmap *regmap;
int gpio_base;
int irq_base;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 53ef894..2366f94 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -75,6 +75,7 @@ enum {
MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
+ MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
MLX4_DEV_CAP_FLAG_WOL = 1LL << 38,
MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 774b895..c93d00a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -79,9 +79,21 @@ struct page {
};
/* Third double word block */
- struct list_head lru; /* Pageout list, eg. active_list
+ union {
+ struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
*/
+ struct { /* slub per cpu partial pages */
+ struct page *next; /* Next partial slab */
+#ifdef CONFIG_64BIT
+ int pages; /* Nr of partial slabs left */
+ int pobjects; /* Approximate # of objects */
+#else
+ short int pages;
+ short int pobjects;
+#endif
+ };
+ };
/* Remainder is not double word aligned */
union {
@@ -135,6 +147,17 @@ struct page {
#endif
;
+struct page_frag {
+ struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 offset;
+ __u32 size;
+#else
+ __u16 offset;
+ __u16 size;
+#endif
+};
+
typedef unsigned long __nocast vm_flags_t;
/*
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ae28e93..468819c 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -405,6 +405,15 @@ struct virtio_device_id {
};
#define VIRTIO_DEV_ANY_ID 0xffffffff
+/*
+ * For Hyper-V devices we use the device guid as the id.
+ */
+struct hv_vmbus_device_id {
+ __u8 guid[16];
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
/* i2c */
#define I2C_NAME_SIZE 20
diff --git a/include/linux/module.h b/include/linux/module.h
index 1c30087..8639216 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -580,9 +580,6 @@ int unregister_module_notifier(struct notifier_block * nb);
extern void print_modules(void);
-extern void module_update_tracepoints(void);
-extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
-
#else /* !CONFIG_MODULES... */
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
@@ -698,15 +695,6 @@ static inline int unregister_module_notifier(struct notifier_block * nb)
static inline void print_modules(void)
{
}
-
-static inline void module_update_tracepoints(void)
-{
-}
-
-static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
-{
- return 0;
-}
#endif /* CONFIG_MODULES */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ddaae98..fffb10b 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -262,6 +262,26 @@ static inline void __kernel_param_unlock(void)
.str = &__param_string_##name, 0, perm); \
__MODULE_PARM_TYPE(name, "string")
+/**
+ * parameq - checks if two parameter names match
+ * @name1: parameter name 1
+ * @name2: parameter name 2
+ *
+ * Returns true if the two parameter names are equal.
+ * Dashes (-) are considered equal to underscores (_).
+ */
+extern bool parameq(const char *name1, const char *name2);
+
+/**
+ * parameqn - checks if two parameter names match
+ * @name1: parameter name 1
+ * @name2: parameter name 2
+ * @n: the length to compare
+ *
+ * Similar to parameq(), except it compares @n characters.
+ */
+extern bool parameqn(const char *name1, const char *name2, size_t n);
+
/* Called on module insert or kernel boot */
extern int parse_args(const char *name,
char *args,
diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h
index 3df0984..ae5df12 100644
--- a/include/linux/net_tstamp.h
+++ b/include/linux/net_tstamp.h
@@ -45,7 +45,7 @@ struct hwtstamp_config {
};
/* possible values for hwtstamp_config->tx_type */
-enum {
+enum hwtstamp_tx_types {
/*
* No outgoing packet will need hardware time stamping;
* should a packet arrive which asks for it, no hardware
@@ -72,7 +72,7 @@ enum {
};
/* possible values for hwtstamp_config->rx_filter */
-enum {
+enum hwtstamp_rx_filters {
/* time stamp no incoming packet at all */
HWTSTAMP_FILTER_NONE,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0db1f5f..df1c836 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -31,7 +31,7 @@
#include <linux/if_link.h>
#ifdef __KERNEL__
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/atomic.h>
@@ -969,7 +969,7 @@ struct net_device {
*/
char name[IFNAMSIZ];
- struct pm_qos_request_list pm_qos_req;
+ struct pm_qos_request pm_qos_req;
/* device name hash chain */
struct hlist_node name_hlist;
@@ -2619,6 +2619,9 @@ static inline const char *netdev_name(const struct net_device *dev)
return dev->name;
}
+extern int __netdev_printk(const char *level, const struct net_device *dev,
+ struct va_format *vaf);
+
extern int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
__attribute__ ((format (printf, 3, 4)));
@@ -2646,8 +2649,7 @@ extern int netdev_info(const struct net_device *dev, const char *format, ...)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
- dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
- netdev_name(__dev), ##args); \
+ dynamic_netdev_dbg(__dev, format, ##args); \
} while (0)
#else
#define netdev_dbg(__dev, format, args...) \
@@ -2714,9 +2716,7 @@ do { \
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
- dynamic_dev_dbg((netdev)->dev.parent, \
- "%s: " format, \
- netdev_name(netdev), ##args); \
+ dynamic_netdev_dbg(netdev, format, ##args); \
} while (0)
#else
#define netif_dbg(priv, type, dev, format, args...) \
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 76f99e8..32345c2 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -373,6 +373,22 @@ enum nfsstat4 {
NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
};
+static inline bool seqid_mutating_err(u32 err)
+{
+ /* rfc 3530 section 8.1.5: */
+ switch (err) {
+ case NFS4ERR_STALE_CLIENTID:
+ case NFS4ERR_STALE_STATEID:
+ case NFS4ERR_BAD_STATEID:
+ case NFS4ERR_BAD_SEQID:
+ case NFS4ERR_BADXDR:
+ case NFS4ERR_RESOURCE:
+ case NFS4ERR_NOFILEHANDLE:
+ return false;
+ };
+ return true;
+}
+
/*
* Note: NF4BAD is not actually part of the protocol; it is just used
* internally by nfsd.
@@ -394,7 +410,10 @@ enum open_claim_type4 {
NFS4_OPEN_CLAIM_NULL = 0,
NFS4_OPEN_CLAIM_PREVIOUS = 1,
NFS4_OPEN_CLAIM_DELEGATE_CUR = 2,
- NFS4_OPEN_CLAIM_DELEGATE_PREV = 3
+ NFS4_OPEN_CLAIM_DELEGATE_PREV = 3,
+ NFS4_OPEN_CLAIM_FH = 4, /* 4.1 */
+ NFS4_OPEN_CLAIM_DELEG_CUR_FH = 5, /* 4.1 */
+ NFS4_OPEN_CLAIM_DELEG_PREV_FH = 6, /* 4.1 */
};
enum opentype4 {
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index eaac770..60a137b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -149,7 +149,6 @@ struct nfs_inode {
unsigned long read_cache_jiffies;
unsigned long attrtimeo;
unsigned long attrtimeo_timestamp;
- __u64 change_attr; /* v4 only */
unsigned long attr_gencount;
/* "Generation counter" for the attribute cache. This is
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index e2791a2..ab465fe 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -34,6 +34,7 @@ enum {
PG_NEED_COMMIT,
PG_NEED_RESCHED,
PG_PNFS_COMMIT,
+ PG_PARTIAL_READ_FAILED,
};
struct nfs_inode;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index abd615d..c74595b 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1133,7 +1133,6 @@ struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
struct nfs_read_data {
- int flags;
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@@ -1156,7 +1155,6 @@ struct nfs_read_data {
};
struct nfs_write_data {
- int flags;
struct rpc_task task;
struct inode *inode;
struct rpc_cred *cred;
@@ -1197,9 +1195,6 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
- int (*lookupfh)(struct nfs_server *, struct nfs_fh *,
- struct qstr *, struct nfs_fh *,
- struct nfs_fattr *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
diff --git a/include/linux/nfsd/Kbuild b/include/linux/nfsd/Kbuild
index 55d1467..b8d4001 100644
--- a/include/linux/nfsd/Kbuild
+++ b/include/linux/nfsd/Kbuild
@@ -1,6 +1,4 @@
-header-y += const.h
header-y += debug.h
header-y += export.h
header-y += nfsfh.h
header-y += stats.h
-header-y += syscall.h
diff --git a/include/linux/nfsd/const.h b/include/linux/nfsd/const.h
deleted file mode 100644
index 323f8cf..0000000
--- a/include/linux/nfsd/const.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * include/linux/nfsd/const.h
- *
- * Various constants related to NFS.
- *
- * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef _LINUX_NFSD_CONST_H
-#define _LINUX_NFSD_CONST_H
-
-#include <linux/nfs.h>
-#include <linux/nfs2.h>
-#include <linux/nfs3.h>
-#include <linux/nfs4.h>
-
-/*
- * Maximum protocol version supported by knfsd
- */
-#define NFSSVC_MAXVERS 3
-
-/*
- * Maximum blocksizes supported by daemon under various circumstances.
- */
-#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
-/* NFSv2 is limited by the protocol specification, see RFC 1094 */
-#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
-
-#ifdef __KERNEL__
-
-#include <linux/sunrpc/msg_prot.h>
-
-/*
- * Largest number of bytes we need to allocate for an NFS
- * call or reply. Used to control buffer sizes. We use
- * the length of v3 WRITE, READDIR and READDIR replies
- * which are an RPC header, up to 26 XDR units of reply
- * data, and some page data.
- *
- * Note that accuracy here doesn't matter too much as the
- * size is rounded up to a page size when allocating space.
- */
-#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
-
-#ifdef CONFIG_NFSD_V4
-# define NFSSVC_XDRSIZE NFS4_SVC_XDRSIZE
-#elif defined(CONFIG_NFSD_V3)
-# define NFSSVC_XDRSIZE NFS3_SVC_XDRSIZE
-#else
-# define NFSSVC_XDRSIZE NFS2_SVC_XDRSIZE
-#endif
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_NFSD_CONST_H */
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 8a31a20..f85308e 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -96,7 +96,6 @@ struct svc_export {
struct auth_domain * ex_client;
int ex_flags;
struct path ex_path;
- char *ex_pathname;
uid_t ex_anon_uid;
gid_t ex_anon_gid;
int ex_fsid;
@@ -137,6 +136,7 @@ struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
struct path *);
+struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
int exp_rootfh(struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index f76d80c..ce4743a 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -14,11 +14,14 @@
#ifndef _LINUX_NFSD_FH_H
#define _LINUX_NFSD_FH_H
-# include <linux/types.h>
+#include <linux/types.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
#ifdef __KERNEL__
# include <linux/sunrpc/svc.h>
#endif
-#include <linux/nfsd/const.h>
/*
* This is the old "dentry style" Linux NFSv2 file handle.
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h
deleted file mode 100644
index 812bc1e..0000000
--- a/include/linux/nfsd/syscall.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * include/linux/nfsd/syscall.h
- *
- * This file holds all declarations for the knfsd syscall interface.
- *
- * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
- */
-
-#ifndef NFSD_SYSCALL_H
-#define NFSD_SYSCALL_H
-
-#include <linux/types.h>
-#include <linux/nfsd/export.h>
-
-/*
- * Version of the syscall interface
- */
-#define NFSCTL_VERSION 0x0201
-
-/*
- * These are the commands understood by nfsctl().
- */
-#define NFSCTL_SVC 0 /* This is a server process. */
-#define NFSCTL_ADDCLIENT 1 /* Add an NFS client. */
-#define NFSCTL_DELCLIENT 2 /* Remove an NFS client. */
-#define NFSCTL_EXPORT 3 /* export a file system. */
-#define NFSCTL_UNEXPORT 4 /* unexport a file system. */
-/*#define NFSCTL_UGIDUPDATE 5 / * update a client's uid/gid map. DISCARDED */
-/*#define NFSCTL_GETFH 6 / * get an fh by ino DISCARDED */
-#define NFSCTL_GETFD 7 /* get an fh by path (used by mountd) */
-#define NFSCTL_GETFS 8 /* get an fh by path with max FH len */
-
-/* SVC */
-struct nfsctl_svc {
- unsigned short svc_port;
- int svc_nthreads;
-};
-
-/* ADDCLIENT/DELCLIENT */
-struct nfsctl_client {
- char cl_ident[NFSCLNT_IDMAX+1];
- int cl_naddr;
- struct in_addr cl_addrlist[NFSCLNT_ADDRMAX];
- int cl_fhkeytype;
- int cl_fhkeylen;
- unsigned char cl_fhkey[NFSCLNT_KEYMAX];
-};
-
-/* EXPORT/UNEXPORT */
-struct nfsctl_export {
- char ex_client[NFSCLNT_IDMAX+1];
- char ex_path[NFS_MAXPATHLEN+1];
- __kernel_old_dev_t ex_dev;
- __kernel_ino_t ex_ino;
- int ex_flags;
- __kernel_uid_t ex_anon_uid;
- __kernel_gid_t ex_anon_gid;
-};
-
-/* GETFD */
-struct nfsctl_fdparm {
- struct sockaddr gd_addr;
- char gd_path[NFS_MAXPATHLEN+1];
- int gd_version;
-};
-
-/* GETFS - GET Filehandle with Size */
-struct nfsctl_fsparm {
- struct sockaddr gd_addr;
- char gd_path[NFS_MAXPATHLEN+1];
- int gd_maxlen;
-};
-
-/*
- * This is the argument union.
- */
-struct nfsctl_arg {
- int ca_version; /* safeguard */
- union {
- struct nfsctl_svc u_svc;
- struct nfsctl_client u_client;
- struct nfsctl_export u_export;
- struct nfsctl_fdparm u_getfd;
- struct nfsctl_fsparm u_getfs;
- /*
- * The following dummy member is needed to preserve binary compatibility
- * on platforms where alignof(void*)>alignof(int). It's needed because
- * this union used to contain a member (u_umap) which contained a
- * pointer.
- */
- void *u_ptr;
- } u;
-#define ca_svc u.u_svc
-#define ca_client u.u_client
-#define ca_export u.u_export
-#define ca_getfd u.u_getfd
-#define ca_getfs u.u_getfs
-};
-
-union nfsctl_res {
- __u8 cr_getfh[NFS_FHSIZE];
- struct knfsd_fh cr_getfs;
-};
-
-#ifdef __KERNEL__
-/*
- * Kernel syscall implementation.
- */
-extern int exp_addclient(struct nfsctl_client *ncp);
-extern int exp_delclient(struct nfsctl_client *ncp);
-extern int exp_export(struct nfsctl_export *nxp);
-extern int exp_unexport(struct nfsctl_export *nxp);
-
-#endif /* __KERNEL__ */
-
-#endif /* NFSD_SYSCALL_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 9180dc5..92c40a1 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -68,6 +68,7 @@ struct device_node {
/* Pointer for first entry in chain of all nodes. */
extern struct device_node *allnodes;
extern struct device_node *of_chosen;
+extern struct device_node *of_aliases;
extern rwlock_t devtree_lock;
static inline bool of_have_populated_dt(void)
@@ -199,6 +200,8 @@ extern int of_property_read_u32_array(const struct device_node *np,
const char *propname,
u32 *out_values,
size_t sz);
+extern int of_property_read_u64(const struct device_node *np,
+ const char *propname, u64 *out_value);
extern int of_property_read_string(struct device_node *np,
const char *propname,
@@ -209,6 +212,9 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
+#define for_each_property(pp, properties) \
+ for (pp = properties; pp != NULL; pp = pp->next)
+
extern int of_n_addr_cells(struct device_node *np);
extern int of_n_size_cells(struct device_node *np);
extern const struct of_device_id *of_match_node(
@@ -221,6 +227,9 @@ extern int of_parse_phandles_with_args(struct device_node *np,
const char *list_name, const char *cells_name, int index,
struct device_node **out_node, const void **out_args);
+extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
+extern int of_alias_get_id(struct device_node *np, const char *stem);
+
extern int of_machine_is_compatible(const char *compat);
extern int prom_add_property(struct device_node* np, struct property* prop);
@@ -235,6 +244,7 @@ extern void of_attach_node(struct device_node *);
extern void of_detach_node(struct device_node *);
#endif
+#define of_match_ptr(_ptr) (_ptr)
#else /* CONFIG_OF */
static inline bool of_have_populated_dt(void)
@@ -242,6 +252,22 @@ static inline bool of_have_populated_dt(void)
return false;
}
+#define for_each_child_of_node(parent, child) \
+ while (0)
+
+static inline int of_device_is_compatible(const struct device_node *device,
+ const char *name)
+{
+ return 0;
+}
+
+static inline struct property *of_find_property(const struct device_node *np,
+ const char *name,
+ int *lenp)
+{
+ return NULL;
+}
+
static inline int of_property_read_u32_array(const struct device_node *np,
const char *propname,
u32 *out_values, size_t sz)
@@ -263,6 +289,21 @@ static inline const void *of_get_property(const struct device_node *node,
return NULL;
}
+static inline int of_property_read_u64(const struct device_node *np,
+ const char *propname, u64 *out_value)
+{
+ return -ENOSYS;
+}
+
+static inline struct device_node *of_parse_phandle(struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ return NULL;
+}
+
+#define of_match_ptr(_ptr) NULL
+#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
static inline int of_property_read_u32(const struct device_node *np,
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 7020e97..87a9208 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -16,9 +16,14 @@
#include <linux/err.h>
#include <linux/cpufreq.h>
+#include <linux/notifier.h>
struct opp;
+enum opp_event {
+ OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
#if defined(CONFIG_PM_OPP)
unsigned long opp_get_voltage(struct opp *opp);
@@ -40,6 +45,8 @@ int opp_enable(struct device *dev, unsigned long freq);
int opp_disable(struct device *dev, unsigned long freq);
+struct srcu_notifier_head *opp_get_notifier(struct device *dev);
+
#else
static inline unsigned long opp_get_voltage(struct opp *opp)
{
@@ -89,6 +96,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
{
return 0;
}
+
+struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
#endif /* CONFIG_PM */
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 49c8727..a4c5624 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
/** lock for read/write safety */
-extern spinlock_t oprofilefs_lock;
+extern raw_spinlock_t oprofilefs_lock;
/**
* Add the contents of a circular buffer to the event buffer.
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 5edc901..b9df9ed 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -16,7 +16,7 @@
#ifdef CONFIG_SMP
struct percpu_counter {
- spinlock_t lock;
+ raw_spinlock_t lock;
s64 count;
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c816075..1e9ebe5 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -220,7 +220,10 @@ struct perf_event_attr {
mmap_data : 1, /* non-exec mmap data */
sample_id_all : 1, /* sample_type all events */
- __reserved_1 : 45;
+ exclude_host : 1, /* don't count in host */
+ exclude_guest : 1, /* don't count in guest */
+
+ __reserved_1 : 43;
union {
__u32 wakeup_events; /* wakeup every n events */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 54fc413..79f337c 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -420,7 +420,7 @@ struct phy_driver {
/*
* Requests a Tx timestamp for 'skb'. The phy driver promises
- * to deliver it to the socket's error queue as soon as a
+ * to deliver it using skb_complete_tx_timestamp() as soon as a
* timestamp becomes available. One of the PTP_CLASS_ values
* is passed in 'type'.
*/
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
new file mode 100644
index 0000000..8886353
--- /dev/null
+++ b/include/linux/pinctrl/machine.h
@@ -0,0 +1,107 @@
+/*
+ * Machine interface for the pinctrl subsystem.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINMUX_MACHINE_H
+#define __LINUX_PINMUX_MACHINE_H
+
+/**
+ * struct pinmux_map - boards/machines shall provide this map for devices
+ * @name: the name of this specific map entry for the particular machine.
+ * This is the second parameter passed to pinmux_get() when you want
+ * to have several mappings to the same device
+ * @ctrl_dev: the pin control device to be used by this mapping, may be NULL
+ * if you provide .ctrl_dev_name instead (this is more common)
+ * @ctrl_dev_name: the name of the device controlling this specific mapping,
+ * the name must be the same as in your struct device*, may be NULL if
+ * you provide .ctrl_dev instead
+ * @function: a function in the driver to use for this mapping, the driver
+ * will lookup the function referenced by this ID on the specified
+ * pin control device
+ * @group: sometimes a function can map to different pin groups, so this
+ * selects a certain specific pin group to activate for the function, if
+ * left as NULL, the first applicable group will be used
+ * @dev: the device using this specific mapping, may be NULL if you provide
+ * .dev_name instead (this is more common)
+ * @dev_name: the name of the device using this specific mapping, the name
+ * must be the same as in your struct device*, may be NULL if you
+ * provide .dev instead
+ * @hog_on_boot: if this is set to true, the pin control subsystem will itself
+ * hog the mappings as the pinmux device drivers are attached, so this is
+ * typically used with system maps (mux mappings without an assigned
+ * device) that you want to get hogged and enabled by default as soon as
+ * a pinmux device supporting it is registered. These maps will not be
+ * disabled and put until the system shuts down.
+ */
+struct pinmux_map {
+ const char *name;
+ struct device *ctrl_dev;
+ const char *ctrl_dev_name;
+ const char *function;
+ const char *group;
+ struct device *dev;
+ const char *dev_name;
+ const bool hog_on_boot;
+};
+
+/*
+ * Convenience macro to set a simple map from a certain pin controller and a
+ * certain function to a named device
+ */
+#define PINMUX_MAP(a, b, c, d) \
+ { .name = a, .ctrl_dev_name = b, .function = c, .dev_name = d }
+
+/*
+ * Convenience macro to map a system function onto a certain pinctrl device.
+ * System functions are not assigned to a particular device.
+ */
+#define PINMUX_MAP_SYS(a, b, c) \
+ { .name = a, .ctrl_dev_name = b, .function = c }
+
+/*
+ * Convenience macro to map a function onto the primary device pinctrl device
+ * this is especially helpful on systems that have only one pin controller
+ * or need to set up a lot of mappings on the primary controller.
+ */
+#define PINMUX_MAP_PRIMARY(a, b, c) \
+ { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
+ .dev_name = c }
+
+/*
+ * Convenience macro to map a system function onto the primary pinctrl device.
+ * System functions are not assigned to a particular device.
+ */
+#define PINMUX_MAP_PRIMARY_SYS(a, b) \
+ { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b }
+
+/*
+ * Convenience macro to map a system function onto the primary pinctrl device,
+ * to be hogged by the pinmux core until the system shuts down.
+ */
+#define PINMUX_MAP_PRIMARY_SYS_HOG(a, b) \
+ { .name = a, .ctrl_dev_name = "pinctrl.0", .function = b, \
+ .hog_on_boot = true }
+
+
+#ifdef CONFIG_PINMUX
+
+extern int pinmux_register_mappings(struct pinmux_map const *map,
+ unsigned num_maps);
+
+#else
+
+static inline int pinmux_register_mappings(struct pinmux_map const *map,
+ unsigned num_maps)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PINMUX */
+#endif
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
new file mode 100644
index 0000000..3605e94
--- /dev/null
+++ b/include/linux/pinctrl/pinctrl.h
@@ -0,0 +1,133 @@
+/*
+ * Interface the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCTRL_H
+#define __LINUX_PINCTRL_PINCTRL_H
+
+#ifdef CONFIG_PINCTRL
+
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+
+struct pinctrl_dev;
+struct pinmux_ops;
+struct gpio_chip;
+
+/**
+ * struct pinctrl_pin_desc - boards/machines provide information on their
+ * pins, pads or other muxable units in this struct
+ * @number: unique pin number from the global pin number space
+ * @name: a name for this pin
+ */
+struct pinctrl_pin_desc {
+ unsigned number;
+ const char *name;
+};
+
+/* Convenience macro to define a single named or anonymous pin descriptor */
+#define PINCTRL_PIN(a, b) { .number = a, .name = b }
+#define PINCTRL_PIN_ANON(a) { .number = a }
+
+/**
+ * struct pinctrl_gpio_range - each pin controller can provide subranges of
+ * the GPIO number space to be handled by the controller
+ * @node: list node for internal use
+ * @name: a name for the chip in this range
+ * @id: an ID number for the chip in this range
+ * @base: base offset of the GPIO range
+ * @npins: number of pins in the GPIO range, including the base number
+ * @gc: an optional pointer to a gpio_chip
+ */
+struct pinctrl_gpio_range {
+ struct list_head node;
+ const char *name;
+ unsigned int id;
+ unsigned int base;
+ unsigned int npins;
+ struct gpio_chip *gc;
+};
+
+/**
+ * struct pinctrl_ops - global pin control operations, to be implemented by
+ * pin controller drivers.
+ * @list_groups: list the number of selectable named groups available
+ * in this pinmux driver, the core will begin on 0 and call this
+ * repeatedly as long as it returns >= 0 to enumerate the groups
+ * @get_group_name: return the group name of the pin group
+ * @get_group_pins: return an array of pins corresponding to a certain
+ * group selector @pins, and the size of the array in @num_pins
+ * @pin_dbg_show: optional debugfs display hook that will provide per-device
+ * info for a certain pin in debugfs
+ */
+struct pinctrl_ops {
+ int (*list_groups) (struct pinctrl_dev *pctldev, unsigned selector);
+ const char *(*get_group_name) (struct pinctrl_dev *pctldev,
+ unsigned selector);
+ int (*get_group_pins) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins);
+ void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset);
+};
+
+/**
+ * struct pinctrl_desc - pin controller descriptor, register this to pin
+ * control subsystem
+ * @name: name for the pin controller
+ * @pins: an array of pin descriptors describing all the pins handled by
+ * this pin controller
+ * @npins: number of descriptors in the array, usually just ARRAY_SIZE()
+ * of the pins field above
+ * @maxpin: since pin spaces may be sparse, there can he "holes" in the
+ * pin range, this attribute gives the maximum pin number in the
+ * total range. This should not be lower than npins for example,
+ * but may be equal to npins if you have no holes in the pin range.
+ * @pctlops: pin control operation vtable, to support global concepts like
+ * grouping of pins, this is optional.
+ * @pmxops: pinmux operation vtable, if you support pinmuxing in your driver
+ * @owner: module providing the pin controller, used for refcounting
+ */
+struct pinctrl_desc {
+ const char *name;
+ struct pinctrl_pin_desc const *pins;
+ unsigned int npins;
+ unsigned int maxpin;
+ struct pinctrl_ops *pctlops;
+ struct pinmux_ops *pmxops;
+ struct module *owner;
+};
+
+/* External interface to pin controller */
+extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data);
+extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
+extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range);
+extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range);
+extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
+extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
+#else
+
+
+/* Sufficiently stupid default function when pinctrl is not in use */
+static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
+{
+ return pin >= 0;
+}
+
+#endif /* !CONFIG_PINCTRL */
+
+#endif /* __LINUX_PINCTRL_PINCTRL_H */
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
new file mode 100644
index 0000000..3c430e7
--- /dev/null
+++ b/include/linux/pinctrl/pinmux.h
@@ -0,0 +1,117 @@
+/*
+ * Interface the pinmux subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINMUX_H
+#define __LINUX_PINCTRL_PINMUX_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "pinctrl.h"
+
+/* This struct is private to the core and should be regarded as a cookie */
+struct pinmux;
+
+#ifdef CONFIG_PINMUX
+
+struct pinctrl_dev;
+
+/**
+ * struct pinmux_ops - pinmux operations, to be implemented by pin controller
+ * drivers that support pinmuxing
+ * @request: called by the core to see if a certain pin can be made available
+ * available for muxing. This is called by the core to acquire the pins
+ * before selecting any actual mux setting across a function. The driver
+ * is allowed to answer "no" by returning a negative error code
+ * @free: the reverse function of the request() callback, frees a pin after
+ * being requested
+ * @list_functions: list the number of selectable named functions available
+ * in this pinmux driver, the core will begin on 0 and call this
+ * repeatedly as long as it returns >= 0 to enumerate mux settings
+ * @get_function_name: return the function name of the muxing selector,
+ * called by the core to figure out which mux setting it shall map a
+ * certain device to
+ * @get_function_groups: return an array of groups names (in turn
+ * referencing pins) connected to a certain function selector. The group
+ * name can be used with the generic @pinctrl_ops to retrieve the
+ * actual pins affected. The applicable groups will be returned in
+ * @groups and the number of groups in @num_groups
+ * @enable: enable a certain muxing function with a certain pin group. The
+ * driver does not need to figure out whether enabling this function
+ * conflicts some other use of the pins in that group, such collisions
+ * are handled by the pinmux subsystem. The @func_selector selects a
+ * certain function whereas @group_selector selects a certain set of pins
+ * to be used. On simple controllers the latter argument may be ignored
+ * @disable: disable a certain muxing selector with a certain pin group
+ * @gpio_request_enable: requests and enables GPIO on a certain pin.
+ * Implement this only if you can mux every pin individually as GPIO. The
+ * affected GPIO range is passed along with an offset into that
+ * specific GPIO range - function selectors and pin groups are orthogonal
+ * to this, the core will however make sure the pins do not collide
+ */
+struct pinmux_ops {
+ int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*list_functions) (struct pinctrl_dev *pctldev, unsigned selector);
+ const char *(*get_function_name) (struct pinctrl_dev *pctldev,
+ unsigned selector);
+ int (*get_function_groups) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups);
+ int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector);
+ void (*disable) (struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector);
+ int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset);
+};
+
+/* External interface to pinmux */
+extern int pinmux_request_gpio(unsigned gpio);
+extern void pinmux_free_gpio(unsigned gpio);
+extern struct pinmux * __must_check pinmux_get(struct device *dev, const char *name);
+extern void pinmux_put(struct pinmux *pmx);
+extern int pinmux_enable(struct pinmux *pmx);
+extern void pinmux_disable(struct pinmux *pmx);
+
+#else /* !CONFIG_PINMUX */
+
+static inline int pinmux_request_gpio(unsigned gpio)
+{
+ return 0;
+}
+
+static inline void pinmux_free_gpio(unsigned gpio)
+{
+}
+
+static inline struct pinmux * __must_check pinmux_get(struct device *dev, const char *name)
+{
+ return NULL;
+}
+
+static inline void pinmux_put(struct pinmux *pmx)
+{
+}
+
+static inline int pinmux_enable(struct pinmux *pmx)
+{
+ return 0;
+}
+
+static inline void pinmux_disable(struct pinmux *pmx)
+{
+}
+
+#endif /* CONFIG_PINMUX */
+
+#endif /* __LINUX_PINCTRL_PINMUX_H */
diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h
new file mode 100644
index 0000000..ada4012
--- /dev/null
+++ b/include/linux/platform_data/dwc3-omap.h
@@ -0,0 +1,47 @@
+/**
+ * dwc3-omap.h - OMAP Specific Glue layer, header.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ * All rights reserved.
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+enum dwc3_omap_utmi_mode {
+ DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
+ DWC3_OMAP_UTMI_MODE_HW,
+ DWC3_OMAP_UTMI_MODE_SW,
+};
+
+struct dwc3_omap_data {
+ enum dwc3_omap_utmi_mode utmi_mode;
+};
diff --git a/include/linux/platform_data/exynos4_tmu.h b/include/linux/platform_data/exynos4_tmu.h
new file mode 100644
index 0000000..39e038c
--- /dev/null
+++ b/include/linux/platform_data/exynos4_tmu.h
@@ -0,0 +1,83 @@
+/*
+ * exynos4_tmu.h - Samsung EXYNOS4 TMU (Thermal Management Unit)
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_EXYNOS4_TMU_H
+#define _LINUX_EXYNOS4_TMU_H
+
+enum calibration_type {
+ TYPE_ONE_POINT_TRIMMING,
+ TYPE_TWO_POINT_TRIMMING,
+ TYPE_NONE,
+};
+
+/**
+ * struct exynos4_tmu_platform_data
+ * @threshold: basic temperature for generating interrupt
+ * 25 <= threshold <= 125 [unit: degree Celsius]
+ * @trigger_levels: array for each interrupt levels
+ * [unit: degree Celsius]
+ * 0: temperature for trigger_level0 interrupt
+ * condition for trigger_level0 interrupt:
+ * current temperature > threshold + trigger_levels[0]
+ * 1: temperature for trigger_level1 interrupt
+ * condition for trigger_level1 interrupt:
+ * current temperature > threshold + trigger_levels[1]
+ * 2: temperature for trigger_level2 interrupt
+ * condition for trigger_level2 interrupt:
+ * current temperature > threshold + trigger_levels[2]
+ * 3: temperature for trigger_level3 interrupt
+ * condition for trigger_level3 interrupt:
+ * current temperature > threshold + trigger_levels[3]
+ * @trigger_level0_en:
+ * 1 = enable trigger_level0 interrupt,
+ * 0 = disable trigger_level0 interrupt
+ * @trigger_level1_en:
+ * 1 = enable trigger_level1 interrupt,
+ * 0 = disable trigger_level1 interrupt
+ * @trigger_level2_en:
+ * 1 = enable trigger_level2 interrupt,
+ * 0 = disable trigger_level2 interrupt
+ * @trigger_level3_en:
+ * 1 = enable trigger_level3 interrupt,
+ * 0 = disable trigger_level3 interrupt
+ * @gain: gain of amplifier in the positive-TC generator block
+ * 0 <= gain <= 15
+ * @reference_voltage: reference voltage of amplifier
+ * in the positive-TC generator block
+ * 0 <= reference_voltage <= 31
+ * @cal_type: calibration type for temperature
+ *
+ * This structure is required for configuration of exynos4_tmu driver.
+ */
+struct exynos4_tmu_platform_data {
+ u8 threshold;
+ u8 trigger_levels[4];
+ bool trigger_level0_en;
+ bool trigger_level1_en;
+ bool trigger_level2_en;
+ bool trigger_level3_en;
+
+ u8 gain;
+ u8 reference_voltage;
+
+ enum calibration_type cal_type;
+};
+#endif /* _LINUX_EXYNOS4_TMU_H */
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
new file mode 100644
index 0000000..e9d9149
--- /dev/null
+++ b/include/linux/platform_data/mv_usb.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_PLATFORM_USB_H
+#define __MV_PLATFORM_USB_H
+
+enum pxa_ehci_type {
+ EHCI_UNDEFINED = 0,
+ PXA_U2OEHCI, /* pxa 168, 9xx */
+ PXA_SPH, /* pxa 168, 9xx SPH */
+ MMP3_HSIC, /* mmp3 hsic */
+ MMP3_FSIC, /* mmp3 fsic */
+};
+
+enum {
+ MV_USB_MODE_OTG,
+ MV_USB_MODE_HOST,
+};
+
+enum {
+ VBUS_LOW = 0,
+ VBUS_HIGH = 1 << 0,
+};
+
+struct mv_usb_addon_irq {
+ unsigned int irq;
+ int (*poll)(void);
+};
+
+struct mv_usb_platform_data {
+ unsigned int clknum;
+ char **clkname;
+ struct mv_usb_addon_irq *id; /* Only valid for OTG. ID pin change*/
+ struct mv_usb_addon_irq *vbus; /* valid for OTG/UDC. VBUS change*/
+
+ /* only valid for HCD. OTG or Host only*/
+ unsigned int mode;
+
+ int (*phy_init)(unsigned int regbase);
+ void (*phy_deinit)(unsigned int regbase);
+ int (*set_vbus)(unsigned int vbus);
+};
+
+#endif
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index abd2862..88734e8 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -36,7 +36,7 @@ struct ntc_thermistor_platform_data {
* read_uV()
*
* How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc
+ * described at Documentation/hwmon/ntc_thermistor
*
* pullup/down_ohm: 0 for infinite / not-connected
*/
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 27bb05a..651a066 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -49,10 +49,54 @@ extern struct resource *platform_get_resource_byname(struct platform_device *, u
extern int platform_get_irq_byname(struct platform_device *, const char *);
extern int platform_add_devices(struct platform_device **, int);
-extern struct platform_device *platform_device_register_resndata(
+struct platform_device_info {
+ struct device *parent;
+
+ const char *name;
+ int id;
+
+ const struct resource *res;
+ unsigned int num_res;
+
+ const void *data;
+ size_t size_data;
+ u64 dma_mask;
+};
+extern struct platform_device *platform_device_register_full(
+ struct platform_device_info *pdevinfo);
+
+/**
+ * platform_device_register_resndata - add a platform-level device with
+ * resources and platform-specific data
+ *
+ * @parent: parent device for the device we're adding
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_resndata(
struct device *parent, const char *name, int id,
const struct resource *res, unsigned int num,
- const void *data, size_t size);
+ const void *data, size_t size) {
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent,
+ .name = name,
+ .id = id,
+ .res = res,
+ .num_res = num,
+ .data = data,
+ .size_data = size,
+ .dma_mask = 0,
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
/**
* platform_device_register_simple - add a platform-level device and its resources
diff --git a/include/linux/pm.h b/include/linux/pm.h
index f7c84c9..f15acb6 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -326,6 +326,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
* requested by a driver.
*/
+#define PM_EVENT_INVALID (-1)
#define PM_EVENT_ON 0x0000
#define PM_EVENT_FREEZE 0x0001
#define PM_EVENT_SUSPEND 0x0002
@@ -346,6 +347,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
+#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, })
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
@@ -366,6 +368,8 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
#define PMSG_AUTO_RESUME ((struct pm_message) \
{ .event = PM_EVENT_AUTO_RESUME, })
+#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
+
/**
* Device run-time power management status.
*
@@ -421,6 +425,22 @@ enum rpm_request {
struct wakeup_source;
+struct pm_domain_data {
+ struct list_head list_node;
+ struct device *dev;
+};
+
+struct pm_subsys_data {
+ spinlock_t lock;
+ unsigned int refcount;
+#ifdef CONFIG_PM_CLK
+ struct list_head clock_list;
+#endif
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+ struct pm_domain_data *domain_data;
+#endif
+};
+
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
@@ -432,6 +452,7 @@ struct dev_pm_info {
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
+ bool wakeup_path:1;
#else
unsigned int should_wakeup:1;
#endif
@@ -462,10 +483,13 @@ struct dev_pm_info {
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
#endif
- void *subsys_data; /* Owned by the subsystem. */
+ struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
+ struct pm_qos_constraints *constraints;
};
extern void update_pm_runtime_accounting(struct device *dev);
+extern int dev_pm_get_subsys_data(struct device *dev);
+extern int dev_pm_put_subsys_data(struct device *dev);
/*
* Power domains provide callbacks that are executed during system suspend,
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
new file mode 100644
index 0000000..8348866
--- /dev/null
+++ b/include/linux/pm_clock.h
@@ -0,0 +1,71 @@
+/*
+ * pm_clock.h - Definitions and headers related to device clocks.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_CLOCK_H
+#define _LINUX_PM_CLOCK_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+struct pm_clk_notifier_block {
+ struct notifier_block nb;
+ struct dev_pm_domain *pm_domain;
+ char *con_ids[];
+};
+
+#ifdef CONFIG_PM_CLK
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+ return dev && dev->power.subsys_data
+ && list_empty(&dev->power.subsys_data->clock_list);
+}
+
+extern void pm_clk_init(struct device *dev);
+extern int pm_clk_create(struct device *dev);
+extern void pm_clk_destroy(struct device *dev);
+extern int pm_clk_add(struct device *dev, const char *con_id);
+extern void pm_clk_remove(struct device *dev, const char *con_id);
+extern int pm_clk_suspend(struct device *dev);
+extern int pm_clk_resume(struct device *dev);
+#else
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+ return true;
+}
+static inline void pm_clk_init(struct device *dev)
+{
+}
+static inline int pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
+static inline void pm_clk_destroy(struct device *dev)
+{
+}
+static inline int pm_clk_add(struct device *dev, const char *con_id)
+{
+ return -EINVAL;
+}
+static inline void pm_clk_remove(struct device *dev, const char *con_id)
+{
+}
+#define pm_clk_suspend NULL
+#define pm_clk_resume NULL
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+extern void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb);
+#else
+static inline void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f9ec173..65633e5 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -13,6 +13,7 @@
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
+ GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
GPD_STATE_BUSY, /* Something is happening to the PM domain */
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */
@@ -25,15 +26,14 @@ struct dev_power_governor {
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
- struct list_head sd_node; /* Node in the parent's subdomain list */
- struct generic_pm_domain *parent; /* Parent PM domain */
- struct list_head sd_list; /* List of dubdomains */
+ struct list_head master_links; /* Links with PM domain as a master */
+ struct list_head slave_links; /* Links with PM domain as a slave */
struct list_head dev_list; /* List of devices */
struct mutex lock;
struct dev_power_governor *gov;
struct work_struct power_off_work;
unsigned int in_progress; /* Number of devices being suspended now */
- unsigned int sd_count; /* Number of subdomains with power "on" */
+ atomic_t sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */
wait_queue_head_t status_wait_queue;
struct task_struct *poweroff_task; /* Powering off task */
@@ -42,6 +42,7 @@ struct generic_pm_domain {
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
+ bool dev_irq_safe; /* Device callbacks are IRQ-safe */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
int (*start_device)(struct device *dev);
@@ -54,12 +55,23 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
return container_of(pd, struct generic_pm_domain, domain);
}
-struct dev_list_entry {
- struct list_head node;
- struct device *dev;
+struct gpd_link {
+ struct generic_pm_domain *master;
+ struct list_head master_node;
+ struct generic_pm_domain *slave;
+ struct list_head slave_node;
+};
+
+struct generic_pm_domain_data {
+ struct pm_domain_data base;
bool need_restore;
};
+static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
+{
+ return container_of(pdd, struct generic_pm_domain_data, base);
+}
+
#ifdef CONFIG_PM_GENERIC_DOMAINS
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev);
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
new file mode 100644
index 0000000..83b0ea3
--- /dev/null
+++ b/include/linux/pm_qos.h
@@ -0,0 +1,155 @@
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+/* interface for the pm_qos_power infrastructure of the linux kernel.
+ *
+ * Mark Gross <mgross@linux.intel.com>
+ */
+#include <linux/plist.h>
+#include <linux/notifier.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+
+#define PM_QOS_RESERVED 0
+#define PM_QOS_CPU_DMA_LATENCY 1
+#define PM_QOS_NETWORK_LATENCY 2
+#define PM_QOS_NETWORK_THROUGHPUT 3
+
+#define PM_QOS_NUM_CLASSES 4
+#define PM_QOS_DEFAULT_VALUE -1
+
+#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
+
+struct pm_qos_request {
+ struct plist_node node;
+ int pm_qos_class;
+};
+
+struct dev_pm_qos_request {
+ struct plist_node node;
+ struct device *dev;
+};
+
+enum pm_qos_type {
+ PM_QOS_UNITIALIZED,
+ PM_QOS_MAX, /* return the largest value */
+ PM_QOS_MIN /* return the smallest value */
+};
+
+/*
+ * Note: The lockless read path depends on the CPU accessing
+ * target_value atomically. Atomic access is only guaranteed on all CPU
+ * types linux supports for 32 bit quantites
+ */
+struct pm_qos_constraints {
+ struct plist_head list;
+ s32 target_value; /* Do not change to 64 bit */
+ s32 default_value;
+ enum pm_qos_type type;
+ struct blocking_notifier_head *notifiers;
+};
+
+/* Action requested to pm_qos_update_target */
+enum pm_qos_req_action {
+ PM_QOS_ADD_REQ, /* Add a new request */
+ PM_QOS_UPDATE_REQ, /* Update an existing request */
+ PM_QOS_REMOVE_REQ /* Remove an existing request */
+};
+
+static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
+{
+ return req->dev != 0;
+}
+
+#ifdef CONFIG_PM
+int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+ enum pm_qos_req_action action, int value);
+void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+ s32 value);
+void pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value);
+void pm_qos_remove_request(struct pm_qos_request *req);
+
+int pm_qos_request(int pm_qos_class);
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_request_active(struct pm_qos_request *req);
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+s32 dev_pm_qos_read_value(struct device *dev);
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ s32 value);
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
+int dev_pm_qos_add_notifier(struct device *dev,
+ struct notifier_block *notifier);
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier);
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
+void dev_pm_qos_constraints_init(struct device *dev);
+void dev_pm_qos_constraints_destroy(struct device *dev);
+#else
+static inline int pm_qos_update_target(struct pm_qos_constraints *c,
+ struct plist_node *node,
+ enum pm_qos_req_action action,
+ int value)
+ { return 0; }
+static inline void pm_qos_add_request(struct pm_qos_request *req,
+ int pm_qos_class, s32 value)
+ { return; }
+static inline void pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+ { return; }
+static inline void pm_qos_remove_request(struct pm_qos_request *req)
+ { return; }
+
+static inline int pm_qos_request(int pm_qos_class)
+ { return 0; }
+static inline int pm_qos_add_notifier(int pm_qos_class,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int pm_qos_remove_notifier(int pm_qos_class,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int pm_qos_request_active(struct pm_qos_request *req)
+ { return 0; }
+static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
+ { return 0; }
+
+static inline s32 dev_pm_qos_read_value(struct device *dev)
+ { return 0; }
+static inline int dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ s32 value)
+ { return 0; }
+static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+ { return 0; }
+static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+ { return 0; }
+static inline int dev_pm_qos_add_notifier(struct device *dev,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_add_global_notifier(
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_remove_global_notifier(
+ struct notifier_block *notifier)
+ { return 0; }
+static inline void dev_pm_qos_constraints_init(struct device *dev)
+{
+ dev->power.power_state = PMSG_ON;
+}
+static inline void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ dev->power.power_state = PMSG_INVALID;
+}
+#endif
+
+#endif
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
deleted file mode 100644
index a7d87f9..0000000
--- a/include/linux/pm_qos_params.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _LINUX_PM_QOS_PARAMS_H
-#define _LINUX_PM_QOS_PARAMS_H
-/* interface for the pm_qos_power infrastructure of the linux kernel.
- *
- * Mark Gross <mgross@linux.intel.com>
- */
-#include <linux/plist.h>
-#include <linux/notifier.h>
-#include <linux/miscdevice.h>
-
-#define PM_QOS_RESERVED 0
-#define PM_QOS_CPU_DMA_LATENCY 1
-#define PM_QOS_NETWORK_LATENCY 2
-#define PM_QOS_NETWORK_THROUGHPUT 3
-
-#define PM_QOS_NUM_CLASSES 4
-#define PM_QOS_DEFAULT_VALUE -1
-
-#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
-#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
-#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
-
-struct pm_qos_request_list {
- struct plist_node list;
- int pm_qos_class;
-};
-
-void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value);
-void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
- s32 new_value);
-void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
-
-int pm_qos_request(int pm_qos_class);
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_request_active(struct pm_qos_request_list *req);
-
-#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index daac05d..70b2840 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -251,46 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
__pm_runtime_use_autosuspend(dev, false);
}
-struct pm_clk_notifier_block {
- struct notifier_block nb;
- struct dev_pm_domain *pm_domain;
- char *con_ids[];
-};
-
-#ifdef CONFIG_PM_CLK
-extern int pm_clk_init(struct device *dev);
-extern void pm_clk_destroy(struct device *dev);
-extern int pm_clk_add(struct device *dev, const char *con_id);
-extern void pm_clk_remove(struct device *dev, const char *con_id);
-extern int pm_clk_suspend(struct device *dev);
-extern int pm_clk_resume(struct device *dev);
-#else
-static inline int pm_clk_init(struct device *dev)
-{
- return -EINVAL;
-}
-static inline void pm_clk_destroy(struct device *dev)
-{
-}
-static inline int pm_clk_add(struct device *dev, const char *con_id)
-{
- return -EINVAL;
-}
-static inline void pm_clk_remove(struct device *dev, const char *con_id)
-{
-}
-#define pm_clk_suspend NULL
-#define pm_clk_resume NULL
-#endif
-
-#ifdef CONFIG_HAVE_CLK
-extern void pm_clk_add_notifier(struct bus_type *bus,
- struct pm_clk_notifier_block *clknb);
-#else
-static inline void pm_clk_add_notifier(struct bus_type *bus,
- struct pm_clk_notifier_block *clknb)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 959c141..042058f 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -81,7 +81,10 @@ struct k_itimer {
unsigned long incr;
unsigned long expires;
} mmtimer;
- struct alarm alarmtimer;
+ struct {
+ struct alarm alarmtimer;
+ ktime_t interval;
+ } alarm;
struct rcu_head rcu;
} it;
};
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index cf793bb..ef35bb7 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -58,7 +58,7 @@ struct prop_local_percpu {
*/
int shift;
unsigned long period;
- spinlock_t lock; /* protect the snapshot state */
+ raw_spinlock_t lock; /* protect the snapshot state */
};
int prop_local_init_percpu(struct prop_local_percpu *pl);
@@ -106,11 +106,11 @@ struct prop_local_single {
*/
unsigned long period;
int shift;
- spinlock_t lock; /* protect the snapshot state */
+ raw_spinlock_t lock; /* protect the snapshot state */
};
#define INIT_PROP_LOCAL_SINGLE(name) \
-{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
int prop_local_init_single(struct prop_local_single *pl);
diff --git a/include/linux/random.h b/include/linux/random.h
index d13059f..8f74538 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -91,6 +91,19 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed)
state->s3 = __seed(i, 15);
}
+#ifdef CONFIG_ARCH_RANDOM
+# include <asm/archrandom.h>
+#else
+static inline int arch_get_random_long(unsigned long *v)
+{
+ return 0;
+}
+static inline int arch_get_random_int(unsigned int *v)
+{
+ return 0;
+}
+#endif
+
#endif /* __KERNEL___ */
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 2f00715..e11ccb4 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -8,7 +8,7 @@
#define DEFAULT_RATELIMIT_BURST 10
struct ratelimit_state {
- spinlock_t lock; /* protect the state */
+ raw_spinlock_t lock; /* protect the state */
int interval;
int burst;
@@ -20,7 +20,7 @@ struct ratelimit_state {
#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
\
struct ratelimit_state name = { \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.interval = interval_init, \
.burst = burst_init, \
}
@@ -28,7 +28,7 @@ struct ratelimit_state {
static inline void ratelimit_state_init(struct ratelimit_state *rs,
int interval, int burst)
{
- spin_lock_init(&rs->lock);
+ raw_spin_lock_init(&rs->lock);
rs->interval = interval;
rs->burst = burst;
rs->printed = 0;
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 8f4f881..2cf4226 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -33,6 +33,7 @@
#ifndef __LINUX_RCUPDATE_H
#define __LINUX_RCUPDATE_H
+#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/threads.h>
@@ -64,32 +65,74 @@ static inline void rcutorture_record_progress(unsigned long vernum)
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+/* Exported common interfaces */
+
+#ifdef CONFIG_PREEMPT_RCU
+
/**
- * struct rcu_head - callback structure for use with RCU
- * @next: next update requests in a list
- * @func: actual update function to call after the grace period.
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed. However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
*/
-struct rcu_head {
- struct rcu_head *next;
- void (*func)(struct rcu_head *head);
-};
+extern void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
-/* Exported common interfaces */
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
+#define call_rcu call_rcu_sched
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by :
+ * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
+ * OR
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ * These may be nested.
+ */
+extern void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+
+/**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_sched() assumes
+ * that the read-side critical sections end on enabling of preemption
+ * or on voluntary preemption.
+ * RCU read-side critical sections are delimited by :
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
+ * OR
+ * anything that disables preemption.
+ * These may be nested.
+ */
extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
-extern void synchronize_sched(void);
-extern void rcu_barrier_bh(void);
-extern void rcu_barrier_sched(void);
-
-static inline void __rcu_read_lock_bh(void)
-{
- local_bh_disable();
-}
-static inline void __rcu_read_unlock_bh(void)
-{
- local_bh_enable();
-}
+extern void synchronize_sched(void);
#ifdef CONFIG_PREEMPT_RCU
@@ -152,6 +195,15 @@ static inline void rcu_exit_nohz(void)
#endif /* #else #ifdef CONFIG_NO_HZ */
+/*
+ * Infrastructure to implement the synchronize_() primitives in
+ * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
+ */
+
+typedef void call_rcu_func_t(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+void wait_rcu_gp(call_rcu_func_t crf);
+
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
@@ -297,19 +349,31 @@ extern int rcu_my_thread_group_empty(void);
/**
* rcu_lockdep_assert - emit lockdep splat if specified condition not met
* @c: condition to check
+ * @s: informative message
*/
-#define rcu_lockdep_assert(c) \
+#define rcu_lockdep_assert(c, s) \
do { \
static bool __warned; \
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
__warned = true; \
- lockdep_rcu_dereference(__FILE__, __LINE__); \
+ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
} \
} while (0)
+#define rcu_sleep_check() \
+ do { \
+ rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh" \
+ " read-side critical section"); \
+ rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched"\
+ " read-side critical section"); \
+ } while (0)
+
#else /* #ifdef CONFIG_PROVE_RCU */
-#define rcu_lockdep_assert(c) do { } while (0)
+#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_sleep_check() do { } while (0)
#endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -338,14 +402,16 @@ extern int rcu_my_thread_group_empty(void);
#define __rcu_dereference_check(p, c, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
- rcu_lockdep_assert(c); \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
+ " usage"); \
rcu_dereference_sparse(p, space); \
smp_read_barrier_depends(); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
- rcu_lockdep_assert(c); \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
+ " usage"); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(p)); \
})
@@ -359,15 +425,15 @@ extern int rcu_my_thread_group_empty(void);
#define __rcu_dereference_index_check(p, c) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
- rcu_lockdep_assert(c); \
+ rcu_lockdep_assert(c, \
+ "suspicious rcu_dereference_index_check()" \
+ " usage"); \
smp_read_barrier_depends(); \
(_________p1); \
})
#define __rcu_assign_pointer(p, v, space) \
({ \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- smp_wmb(); \
+ smp_wmb(); \
(p) = (typeof(*v) __force space *)(v); \
})
@@ -500,26 +566,6 @@ extern int rcu_my_thread_group_empty(void);
#define rcu_dereference_protected(p, c) \
__rcu_dereference_protected((p), (c), __rcu)
-/**
- * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * This is the RCU-bh counterpart to rcu_dereference_protected().
- */
-#define rcu_dereference_bh_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
-
-/**
- * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * This is the RCU-sched counterpart to rcu_dereference_protected().
- */
-#define rcu_dereference_sched_protected(p, c) \
- __rcu_dereference_protected((p), (c), __rcu)
-
/**
* rcu_dereference() - fetch RCU-protected pointer for dereferencing
@@ -630,7 +676,7 @@ static inline void rcu_read_unlock(void)
*/
static inline void rcu_read_lock_bh(void)
{
- __rcu_read_lock_bh();
+ local_bh_disable();
__acquire(RCU_BH);
rcu_read_acquire_bh();
}
@@ -644,7 +690,7 @@ static inline void rcu_read_unlock_bh(void)
{
rcu_read_release_bh();
__release(RCU_BH);
- __rcu_read_unlock_bh();
+ local_bh_enable();
}
/**
@@ -698,11 +744,18 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* any prior initialization. Returns the value assigned.
*
* Inserts memory barriers on architectures that require them
- * (pretty much all of them other than x86), and also prevents
- * the compiler from reordering the code that initializes the
- * structure after the pointer assignment. More importantly, this
- * call documents which pointers will be dereferenced by RCU read-side
- * code.
+ * (which is most of them), and also prevents the compiler from
+ * reordering the code that initializes the structure after the pointer
+ * assignment. More importantly, this call documents which pointers
+ * will be dereferenced by RCU read-side code.
+ *
+ * In some special cases, you may use RCU_INIT_POINTER() instead
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+ * to the fact that it does not constrain either the CPU or the compiler.
+ * That said, using RCU_INIT_POINTER() when you should have used
+ * rcu_assign_pointer() is a very bad thing that results in
+ * impossible-to-diagnose memory corruption. So please be careful.
+ * See the RCU_INIT_POINTER() comment header for details.
*/
#define rcu_assign_pointer(p, v) \
__rcu_assign_pointer((p), (v), __rcu)
@@ -710,105 +763,38 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
*
- * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep
- * splats.
+ * Initialize an RCU-protected pointer in special cases where readers
+ * do not need ordering constraints on the CPU or the compiler. These
+ * special cases are:
+ *
+ * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 2. The caller has taken whatever steps are required to prevent
+ * RCU readers from concurrently accessing this pointer -or-
+ * 3. The referenced data structure has already been exposed to
+ * readers either at compile time or via rcu_assign_pointer() -and-
+ * a. You have not made -any- reader-visible changes to
+ * this structure since then -or-
+ * b. It is OK for readers accessing this structure from its
+ * new location to see the old state of the structure. (For
+ * example, the changes were to statistical counters or to
+ * other state where exact synchronization is not required.)
+ *
+ * Failure to follow these rules governing use of RCU_INIT_POINTER() will
+ * result in impossible-to-diagnose memory corruption. As in the structures
+ * will look OK in crash dumps, but any concurrent RCU readers might
+ * see pre-initialized values of the referenced data structure. So
+ * please be very careful how you use RCU_INIT_POINTER()!!!
+ *
+ * If you are creating an RCU-protected linked structure that is accessed
+ * by a single external-to-structure RCU-protected pointer, then you may
+ * use RCU_INIT_POINTER() to initialize the internal RCU-protected
+ * pointers, but you must use rcu_assign_pointer() to initialize the
+ * external-to-structure pointer -after- you have completely initialized
+ * the reader-accessible portions of the linked structure.
*/
#define RCU_INIT_POINTER(p, v) \
p = (typeof(*v) __force __rcu *)(v)
-/* Infrastructure to implement the synchronize_() primitives. */
-
-struct rcu_synchronize {
- struct rcu_head head;
- struct completion completion;
-};
-
-extern void wakeme_after_rcu(struct rcu_head *head);
-
-#ifdef CONFIG_PREEMPT_RCU
-
-/**
- * call_rcu() - Queue an RCU callback for invocation after a grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all pre-existing RCU read-side
- * critical sections have completed. However, the callback function
- * might well execute concurrently with RCU read-side critical sections
- * that started after call_rcu() was invoked. RCU read-side critical
- * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
- * and may be nested.
- */
-extern void call_rcu(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-#else /* #ifdef CONFIG_PREEMPT_RCU */
-
-/* In classic RCU, call_rcu() is just call_rcu_sched(). */
-#define call_rcu call_rcu_sched
-
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
-
-/**
- * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
- * @head: structure to be used for queueing the RCU updates.
- * @func: actual callback function to be invoked after the grace period
- *
- * The callback function will be invoked some time after a full grace
- * period elapses, in other words after all currently executing RCU
- * read-side critical sections have completed. call_rcu_bh() assumes
- * that the read-side critical sections end on completion of a softirq
- * handler. This means that read-side critical sections in process
- * context must not be interrupted by softirqs. This interface is to be
- * used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
- * OR
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- * These may be nested.
- */
-extern void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
-
-/*
- * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
- * by call_rcu() and rcu callback execution, and are therefore not part of the
- * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
- */
-
-#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-# define STATE_RCU_HEAD_READY 0
-# define STATE_RCU_HEAD_QUEUED 1
-
-extern struct debug_obj_descr rcuhead_debug_descr;
-
-static inline void debug_rcu_head_queue(struct rcu_head *head)
-{
- WARN_ON_ONCE((unsigned long)head & 0x3);
- debug_object_activate(head, &rcuhead_debug_descr);
- debug_object_active_state(head, &rcuhead_debug_descr,
- STATE_RCU_HEAD_READY,
- STATE_RCU_HEAD_QUEUED);
-}
-
-static inline void debug_rcu_head_unqueue(struct rcu_head *head)
-{
- debug_object_active_state(head, &rcuhead_debug_descr,
- STATE_RCU_HEAD_QUEUED,
- STATE_RCU_HEAD_READY);
- debug_object_deactivate(head, &rcuhead_debug_descr);
-}
-#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-static inline void debug_rcu_head_queue(struct rcu_head *head)
-{
-}
-
-static inline void debug_rcu_head_unqueue(struct rcu_head *head)
-{
-}
-#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-
static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
{
return offset < 4096;
@@ -827,18 +813,6 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
call_rcu(head, (rcu_callback)offset);
}
-extern void kfree(const void *);
-
-static inline void __rcu_reclaim(struct rcu_head *head)
-{
- unsigned long offset = (unsigned long)head->func;
-
- if (__is_kfree_rcu_offset(offset))
- kfree((void *)head - offset);
- else
- head->func(head);
-}
-
/**
* kfree_rcu() - kfree an object after a grace period.
* @ptr: pointer to kfree
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 52b3e02..00b7a5e 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -27,9 +27,23 @@
#include <linux/cache.h>
+#ifdef CONFIG_RCU_BOOST
static inline void rcu_init(void)
{
}
+#else /* #ifdef CONFIG_RCU_BOOST */
+void rcu_init(void);
+#endif /* #else #ifdef CONFIG_RCU_BOOST */
+
+static inline void rcu_barrier_bh(void)
+{
+ wait_rcu_gp(call_rcu_bh);
+}
+
+static inline void rcu_barrier_sched(void)
+{
+ wait_rcu_gp(call_rcu_sched);
+}
#ifdef CONFIG_TINY_RCU
@@ -45,9 +59,13 @@ static inline void rcu_barrier(void)
#else /* #ifdef CONFIG_TINY_RCU */
-void rcu_barrier(void);
void synchronize_rcu_expedited(void);
+static inline void rcu_barrier(void)
+{
+ wait_rcu_gp(call_rcu);
+}
+
#endif /* #else #ifdef CONFIG_TINY_RCU */
static inline void synchronize_rcu_bh(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e65d066..6745846 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -67,6 +67,8 @@ static inline void synchronize_rcu_bh_expedited(void)
}
extern void rcu_barrier(void);
+extern void rcu_barrier_bh(void);
+extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 60a65cd..3daac2d 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -20,9 +20,77 @@
struct i2c_client;
struct spi_device;
+/* An enum of all the supported cache types */
+enum regcache_type {
+ REGCACHE_NONE,
+ REGCACHE_INDEXED,
+ REGCACHE_RBTREE,
+ REGCACHE_LZO
+};
+
+/**
+ * Default value for a register. We use an array of structs rather
+ * than a simple array as many modern devices have very sparse
+ * register maps.
+ *
+ * @reg: Register address.
+ * @def: Register default value.
+ */
+struct reg_default {
+ unsigned int reg;
+ unsigned int def;
+};
+
+/**
+ * Configuration for the register map of a device.
+ *
+ * @reg_bits: Number of bits in a register address, mandatory.
+ * @val_bits: Number of bits in a register value, mandatory.
+ *
+ * @writeable_reg: Optional callback returning true if the register
+ * can be written to.
+ * @readable_reg: Optional callback returning true if the register
+ * can be read from.
+ * @volatile_reg: Optional callback returning true if the register
+ * value can't be cached.
+ * @precious_reg: Optional callback returning true if the rgister
+ * should not be read outside of a call from the driver
+ * (eg, a clear on read interrupt status register).
+ *
+ * @max_register: Optional, specifies the maximum valid register index.
+ * @reg_defaults: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults: Number of elements in reg_defaults.
+ *
+ * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * a read.
+ * @write_flag_mask: Mask to be set in the top byte of the register when doing
+ * a write. If both read_flag_mask and write_flag_mask are
+ * empty the regmap_bus default masks are used.
+ *
+ * @cache_type: The actual cache type.
+ * @reg_defaults_raw: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ */
struct regmap_config {
int reg_bits;
int val_bits;
+
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+
+ unsigned int max_register;
+ struct reg_default *reg_defaults;
+ unsigned int num_reg_defaults;
+ enum regcache_type cache_type;
+ const void *reg_defaults_raw;
+ unsigned int num_reg_defaults_raw;
+
+ u8 read_flag_mask;
+ u8 write_flag_mask;
};
typedef int (*regmap_hw_write)(struct device *dev, const void *data,
@@ -37,25 +105,18 @@ typedef int (*regmap_hw_read)(struct device *dev,
/**
* Description of a hardware bus for the register map infrastructure.
*
- * @list: Internal use.
- * @type: Bus type, used to identify bus to be used for a device.
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
* @read: Read operation. Data is returned in the buffer used to transmit
* data.
- * @owner: Module with the bus implementation, used to pin the implementation
- * in memory.
* @read_flag_mask: Mask to be set in the top byte of the register when doing
* a read.
*/
struct regmap_bus {
- struct list_head list;
- struct bus_type *type;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_read read;
- struct module *owner;
u8 read_flag_mask;
};
@@ -79,4 +140,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val);
+int regcache_sync(struct regmap *map);
+void regcache_cache_only(struct regmap *map, bool enable);
+void regcache_cache_bypass(struct regmap *map, bool enable);
+
#endif
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b891de9..67be037 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -154,6 +154,8 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 3470124..d5b13bc 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -22,7 +22,7 @@
*/
struct rw_semaphore {
__s32 activity;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 6a67414..63d4065 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -25,7 +25,7 @@ struct rw_semaphore;
/* All arch specific implementations share the same struct */
struct rw_semaphore {
long count;
- spinlock_t wait_lock;
+ raw_spinlock_t wait_lock;
struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -56,9 +56,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
-#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_INITIALIZER(name) \
+ { RWSEM_UNLOCKED_VALUE, \
+ __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 41d0237..e8acce7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -90,6 +90,7 @@ struct sched_param {
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
#include <linux/cred.h>
+#include <linux/llist.h>
#include <asm/processor.h>
@@ -270,7 +271,6 @@ extern void init_idle_bootup_task(struct task_struct *idle);
extern int runqueue_is_locked(int cpu);
-extern cpumask_var_t nohz_cpu_mask;
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
extern int get_nohz_timer_target(void);
@@ -510,7 +510,7 @@ struct task_cputime {
struct thread_group_cputimer {
struct task_cputime cputime;
int running;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
#include <linux/rwsem.h>
@@ -1225,7 +1225,7 @@ struct task_struct {
unsigned int ptrace;
#ifdef CONFIG_SMP
- struct task_struct *wake_entry;
+ struct llist_node wake_entry;
int on_cpu;
#endif
int on_rq;
@@ -1260,9 +1260,6 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
-#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
- int rcu_boosted;
-#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -2039,6 +2036,10 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
#endif
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+#endif
+
#ifdef CONFIG_RT_MUTEXES
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
@@ -2165,7 +2166,8 @@ extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
-extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
+extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
+ const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
@@ -2565,7 +2567,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- spin_lock_init(&sig->cputimer.lock);
+ raw_spin_lock_init(&sig->cputimer.lock);
}
/*
diff --git a/include/linux/security.h b/include/linux/security.h
index ebd2a53..19d8e04 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -36,6 +36,7 @@
#include <linux/key.h>
#include <linux/xfrm.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <net/flow.h>
/* Maximum number of letters for an LSM name string */
@@ -147,6 +148,10 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
#endif
+/* security_inode_init_security callback function to write xattrs */
+typedef int (*initxattrs) (struct inode *inode,
+ const struct xattr *xattr_array, void *fs_data);
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -1367,7 +1372,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* @inode_getsecctx:
* Returns a string containing all relavent security context information
*
- * @inode we wish to set the security context of.
+ * @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
* This is the main security structure.
@@ -1655,6 +1660,8 @@ struct security_operations {
extern int security_init(void);
extern int security_module_enable(struct security_operations *ops);
extern int register_security(struct security_operations *ops);
+extern void __init security_fixup_ops(struct security_operations *ops);
+
/* Security operations */
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
@@ -1704,8 +1711,11 @@ int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
int security_inode_alloc(struct inode *inode);
void security_inode_free(struct inode *inode);
int security_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
- void **value, size_t *len);
+ const struct qstr *qstr,
+ initxattrs initxattrs, void *fs_data);
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, char **name,
+ void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
@@ -2034,11 +2044,19 @@ static inline void security_inode_free(struct inode *inode)
static inline int security_inode_init_security(struct inode *inode,
struct inode *dir,
const struct qstr *qstr,
- char **name,
- void **value,
- size_t *len)
+ initxattrs initxattrs,
+ void *fs_data)
{
- return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline int security_old_inode_init_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr,
+ char **name, void **value,
+ size_t *len)
+{
+ return 0;
}
static inline int security_inode_create(struct inode *dir,
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 39fa049..dc368b8 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -14,14 +14,14 @@
/* Please don't access any members of this structure directly */
struct semaphore {
- spinlock_t lock;
+ raw_spinlock_t lock;
unsigned int count;
struct list_head wait_list;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
.count = n, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
diff --git a/include/linux/serial.h b/include/linux/serial.h
index ef91406..97ff8e2 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -211,6 +211,7 @@ struct serial_rs485 {
#define SER_RS485_RTS_ON_SEND (1 << 1)
#define SER_RS485_RTS_AFTER_SEND (1 << 2)
#define SER_RS485_RTS_BEFORE_SEND (1 << 3)
+#define SER_RS485_RX_DURING_TX (1 << 4)
__u32 delay_rts_before_send; /* Milliseconds */
__u32 delay_rts_after_send; /* Milliseconds */
__u32 padding[5]; /* Memory is cheap, new structs
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 97f5b45..1f05bbe 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -35,6 +35,7 @@ struct plat_serial8250_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned old);
};
@@ -80,6 +81,7 @@ extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
extern void serial8250_set_isa_configurator(void (*v)
(int port, struct uart_port *up,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a5c3114..eadf33d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -46,7 +46,8 @@
#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
#define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */
#define PORT_TEGRA 20 /* NVIDIA Tegra internal UART */
-#define PORT_MAX_8250 20 /* max port ID */
+#define PORT_XR17D15X 21 /* Exar XR17D15x UART */
+#define PORT_MAX_8250 21 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -300,6 +301,7 @@ struct uart_port {
void (*set_termios)(struct uart_port *,
struct ktermios *new,
struct ktermios *old);
+ int (*handle_irq)(struct uart_port *);
void (*pm)(struct uart_port *, unsigned int state,
unsigned int old);
unsigned int irq; /* irq number */
@@ -317,9 +319,7 @@ struct uart_port {
#define UPIO_MEM32 (3)
#define UPIO_AU (4) /* Au1x00 type IO */
#define UPIO_TSI (5) /* Tsi108/109 type IO */
-#define UPIO_DWAPB (6) /* DesignWare APB UART */
-#define UPIO_RM9000 (7) /* RM9000 type IO */
-#define UPIO_DWAPB32 (8) /* DesignWare APB UART (32 bit accesses) */
+#define UPIO_RM9000 (6) /* RM9000 type IO */
unsigned int read_status_mask; /* driver specific */
unsigned int ignore_status_mask; /* driver specific */
@@ -350,6 +350,7 @@ struct uart_port {
#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16))
#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
+#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
/* The exact UART type is known and should not be probed. */
#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
@@ -384,7 +385,6 @@ struct uart_state {
int pm_state;
struct circ_buf xmit;
- struct tasklet_struct tlet;
struct uart_port *uart_port;
};
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index c75bda3..8ce70d7 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -152,6 +152,7 @@
* LCR=0xBF (or DLAB=1 for 16C660)
*/
#define UART_EFR 2 /* I/O: Extended Features Register */
+#define UART_XR_EFR 9 /* I/O: Extended Features Register (XR17D15x) */
#define UART_EFR_CTS 0x80 /* CTS flow control */
#define UART_EFR_RTS 0x40 /* RTS flow control */
#define UART_EFR_SCD 0x20 /* Special character detect */
diff --git a/include/linux/serio.h b/include/linux/serio.h
index e26f478..be7dfb0 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -199,5 +199,6 @@ static inline void serio_continue_rx(struct serio *serio)
#define SERIO_DYNAPRO 0x3a
#define SERIO_HAMPSHIRE 0x3b
#define SERIO_PS2MULT 0x3c
+#define SERIO_TSC40 0x3d
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 64f8695..6a6b352 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -140,7 +140,9 @@ struct sk_buff;
typedef struct skb_frag_struct skb_frag_t;
struct skb_frag_struct {
- struct page *page;
+ struct {
+ struct page *p;
+ } page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
__u32 size;
@@ -150,6 +152,26 @@ struct skb_frag_struct {
#endif
};
+static inline unsigned int skb_frag_size(const skb_frag_t *frag)
+{
+ return frag->size;
+}
+
+static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
+{
+ frag->size = size;
+}
+
+static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
+{
+ frag->size += delta;
+}
+
+static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+ frag->size -= delta;
+}
+
#define HAVE_HW_TIME_STAMP
/**
@@ -530,6 +552,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
}
+extern void skb_recycle(struct sk_buff *skb);
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
@@ -832,9 +855,9 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
{
- struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ struct sk_buff *list = ((const struct sk_buff *)list_)->next;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
@@ -853,9 +876,9 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
* The reference count is not incremented and the reference is therefore
* volatile. Use with caution.
*/
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+ struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
if (list == (struct sk_buff *)list_)
list = NULL;
return list;
@@ -1132,7 +1155,7 @@ static inline int skb_pagelen(const struct sk_buff *skb)
int i, len = 0;
for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
- len += skb_shinfo(skb)->frags[i].size;
+ len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
return len + skb_headlen(skb);
}
@@ -1154,9 +1177,9 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
{
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
+ frag->page.p = page;
frag->page_offset = off;
- frag->size = size;
+ skb_frag_size_set(frag, size);
}
/**
@@ -1678,7 +1701,7 @@ static inline void netdev_free_page(struct net_device *dev, struct page *page)
*/
static inline struct page *skb_frag_page(const skb_frag_t *frag)
{
- return frag->page;
+ return frag->page.p;
}
/**
@@ -1764,8 +1787,7 @@ static inline void *skb_frag_address_safe(const skb_frag_t *frag)
*/
static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
{
- frag->page = page;
- __skb_frag_ref(frag);
+ frag->page.p = page;
}
/**
@@ -1810,7 +1832,7 @@ static inline dma_addr_t skb_frag_dma_map(struct device *dev,
* Returns true if modifying the header part of the cloned buffer
* does not requires the data to be copied.
*/
-static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
+static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
{
return !skb_header_cloned(skb) &&
skb_headroom(skb) + len <= skb->hdr_len;
@@ -1907,10 +1929,10 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
const struct page *page, int off)
{
if (i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
return page == skb_frag_page(frag) &&
- off == frag->page_offset + frag->size;
+ off == frag->page_offset + skb_frag_size(frag);
}
return 0;
}
@@ -2194,8 +2216,13 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
/**
* skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
*
+ * PHY drivers may accept clones of transmitted packets for
+ * timestamping via their phy_driver.txtstamp method. These drivers
+ * must call this function to return the skb back to the stack, with
+ * or without a timestamp.
+ *
* @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps
+ * @hwtstamps: hardware time stamps, may be NULL if not available
*
*/
void skb_complete_tx_timestamp(struct sk_buff *skb,
@@ -2431,7 +2458,8 @@ static inline bool skb_warn_if_lro(const struct sk_buff *skb)
{
/* LRO sets gso_size but not gso_type, whereas if GSO is really
* wanted then gso_type will be set. */
- struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
unlikely(shinfo->gso_type == 0)) {
__skb_warn_lro_forwarding(skb);
@@ -2455,7 +2483,7 @@ static inline void skb_forward_csum(struct sk_buff *skb)
* Instead of forcing ip_summed to CHECKSUM_NONE, we can
* use this helper, to document places where we make this assertion.
*/
-static inline void skb_checksum_none_assert(struct sk_buff *skb)
+static inline void skb_checksum_none_assert(const struct sk_buff *skb)
{
#ifdef DEBUG
BUG_ON(skb->ip_summed != CHECKSUM_NONE);
@@ -2464,5 +2492,25 @@ static inline void skb_checksum_none_assert(struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+{
+ if (irqs_disabled())
+ return false;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+ return false;
+
+ if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ return false;
+
+ skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+ if (skb_end_pointer(skb) - skb->head < skb_size)
+ return false;
+
+ if (skb_shared(skb) || skb_cloned(skb))
+ return false;
+
+ return true;
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index f58d641..a32bcfd 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -36,12 +36,15 @@ enum stat_item {
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
+ CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
+ CPU_PARTIAL_FREE, /* USed cpu partial on free */
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {
void **freelist; /* Pointer to next available object */
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
+ struct page *partial; /* Partially allocated frozen slabs */
int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
@@ -79,6 +82,7 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int objsize; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
+ int cpu_partial; /* Number of per cpu partial objects to keep around */
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index db7bcaf..492486a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -9,6 +9,7 @@
#ifndef _LINUX_SUNRPC_CLNT_H
#define _LINUX_SUNRPC_CLNT_H
+#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -161,7 +162,7 @@ const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
size_t rpc_pton(const char *, const size_t,
struct sockaddr *, const size_t);
-char * rpc_sockaddr2uaddr(const struct sockaddr *);
+char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
size_t rpc_uaddr2sockaddr(const char *, const size_t,
struct sockaddr *, const size_t);
@@ -218,7 +219,13 @@ static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
{
const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
- return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
+
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+ return false;
+ else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ return sin1->sin6_scope_id == sin2->sin6_scope_id;
+
+ return true;
}
static inline bool __rpc_copy_addr6(struct sockaddr *dst,
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index cf14db9..e4ea430 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -44,6 +44,8 @@ RPC_I(struct inode *inode)
return container_of(inode, struct rpc_inode, vfs_inode);
}
+extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
+ char __user *, size_t);
extern int rpc_queue_upcall(struct inode *, struct rpc_pipe_msg *);
struct rpc_clnt;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 223588a..d8d5d93 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -212,11 +212,6 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
iov->iov_len += sizeof(__be32);
}
-union svc_addr_u {
- struct in_addr addr;
- struct in6_addr addr6;
-};
-
/*
* The context of a single thread, including the request currently being
* processed.
@@ -225,8 +220,12 @@ struct svc_rqst {
struct list_head rq_list; /* idle list */
struct list_head rq_all; /* all threads list */
struct svc_xprt * rq_xprt; /* transport ptr */
+
struct sockaddr_storage rq_addr; /* peer address */
size_t rq_addrlen;
+ struct sockaddr_storage rq_daddr; /* dest addr of request
+ * - reply from here */
+ size_t rq_daddrlen;
struct svc_serv * rq_server; /* RPC service definition */
struct svc_pool * rq_pool; /* thread pool */
@@ -255,9 +254,6 @@ struct svc_rqst {
unsigned short
rq_secure : 1; /* secure port */
- union svc_addr_u rq_daddr; /* dest addr of request
- * - reply from here */
-
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
void * rq_auth_data; /* flavor-specific data */
@@ -300,6 +296,21 @@ static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_addr;
}
+static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in *) &rqst->rq_daddr;
+}
+
+static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in6 *) &rqst->rq_daddr;
+}
+
+static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr *) &rqst->rq_daddr;
+}
+
/*
* Check buffer bounds after decoding arguments
*/
@@ -340,7 +351,8 @@ struct svc_deferred_req {
struct svc_xprt *xprt;
struct sockaddr_storage addr; /* where reply must go */
size_t addrlen;
- union svc_addr_u daddr; /* where reply must come from */
+ struct sockaddr_storage daddr; /* where reply must come from */
+ size_t daddrlen;
struct cache_deferred_req handle;
size_t xprt_hlen;
int argslen;
@@ -404,7 +416,7 @@ struct svc_procedure {
struct svc_serv *svc_create(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *));
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
- struct svc_pool *pool);
+ struct svc_pool *pool, int node);
void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
void (*shutdown)(struct svc_serv *),
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6bbcef2..57a6924 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -8,15 +8,18 @@
#include <linux/mm.h>
#include <asm/errno.h>
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+#ifdef CONFIG_VT
extern void pm_set_vt_switch(int);
-extern int pm_prepare_console(void);
-extern void pm_restore_console(void);
#else
static inline void pm_set_vt_switch(int do_switch)
{
}
+#endif
+#ifdef CONFIG_VT_CONSOLE_SLEEP
+extern int pm_prepare_console(void);
+extern void pm_restore_console(void);
+#else
static inline int pm_prepare_console(void)
{
return 0;
@@ -34,6 +37,58 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
+enum suspend_stat_step {
+ SUSPEND_FREEZE = 1,
+ SUSPEND_PREPARE,
+ SUSPEND_SUSPEND,
+ SUSPEND_SUSPEND_NOIRQ,
+ SUSPEND_RESUME_NOIRQ,
+ SUSPEND_RESUME
+};
+
+struct suspend_stats {
+ int success;
+ int fail;
+ int failed_freeze;
+ int failed_prepare;
+ int failed_suspend;
+ int failed_suspend_noirq;
+ int failed_resume;
+ int failed_resume_noirq;
+#define REC_FAILED_NUM 2
+ int last_failed_dev;
+ char failed_devs[REC_FAILED_NUM][40];
+ int last_failed_errno;
+ int errno[REC_FAILED_NUM];
+ int last_failed_step;
+ enum suspend_stat_step failed_steps[REC_FAILED_NUM];
+};
+
+extern struct suspend_stats suspend_stats;
+
+static inline void dpm_save_failed_dev(const char *name)
+{
+ strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ name,
+ sizeof(suspend_stats.failed_devs[0]));
+ suspend_stats.last_failed_dev++;
+ suspend_stats.last_failed_dev %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_errno(int err)
+{
+ suspend_stats.errno[suspend_stats.last_failed_errno] = err;
+ suspend_stats.last_failed_errno++;
+ suspend_stats.last_failed_errno %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_step(enum suspend_stat_step step)
+{
+ suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
+ suspend_stats.last_failed_step++;
+ suspend_stats.last_failed_step %= REC_FAILED_NUM;
+}
+
/**
* struct platform_suspend_ops - Callbacks for managing platform dependent
* system sleep states.
@@ -334,4 +389,38 @@ static inline void unlock_system_sleep(void)
}
#endif
+#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
+/*
+ * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
+ * to save/restore additional information to/from the array of page
+ * frame numbers in the hibernation image. For s390 this is used to
+ * save and restore the storage key for each page that is included
+ * in the hibernation image.
+ */
+unsigned long page_key_additional_pages(unsigned long pages);
+int page_key_alloc(unsigned long pages);
+void page_key_free(void);
+void page_key_read(unsigned long *pfn);
+void page_key_memorize(unsigned long *pfn);
+void page_key_write(void *address);
+
+#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
+
+static inline unsigned long page_key_additional_pages(unsigned long pages)
+{
+ return 0;
+}
+
+static inline int page_key_alloc(unsigned long pages)
+{
+ return 0;
+}
+
+static inline void page_key_free(void) {}
+static inline void page_key_read(unsigned long *pfn) {}
+static inline void page_key_memorize(unsigned long *pfn) {}
+static inline void page_key_write(void *address) {}
+
+#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
+
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index d7d2f21..dac0859 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -112,6 +112,7 @@ struct bin_attribute {
struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *,char *);
ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
+ const void *(*namespace)(struct kobject *, const struct attribute *);
};
struct sysfs_dirent;
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
index 7a81303..4eb4902 100644
--- a/include/linux/trace_clock.h
+++ b/include/linux/trace_clock.h
@@ -15,5 +15,6 @@
extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void);
extern u64 notrace trace_clock_global(void);
+extern u64 notrace trace_clock_counter(void);
#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d530a44..df0a779 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -54,8 +54,18 @@ extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data);
extern void tracepoint_probe_update_all(void);
+#ifdef CONFIG_MODULES
+struct tp_module {
+ struct list_head list;
+ unsigned int num_tracepoints;
+ struct tracepoint * const *tracepoints_ptrs;
+};
+#endif /* CONFIG_MODULES */
+
struct tracepoint_iter {
- struct module *module;
+#ifdef CONFIG_MODULES
+ struct tp_module *module;
+#endif /* CONFIG_MODULES */
struct tracepoint * const *tracepoint;
};
@@ -63,8 +73,6 @@ extern void tracepoint_iter_start(struct tracepoint_iter *iter);
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
- struct tracepoint * const *begin, struct tracepoint * const *end);
/*
* tracepoint_synchronize_unregister must be called between the last tracepoint
@@ -78,17 +86,6 @@ static inline void tracepoint_synchronize_unregister(void)
#define PARAMS(args...) args
-#ifdef CONFIG_TRACEPOINTS
-extern
-void tracepoint_update_probe_range(struct tracepoint * const *begin,
- struct tracepoint * const *end);
-#else
-static inline
-void tracepoint_update_probe_range(struct tracepoint * const *begin,
- struct tracepoint * const *end)
-{ }
-#endif /* CONFIG_TRACEPOINTS */
-
#endif /* _LINUX_TRACEPOINT_H */
/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5f2ede8..5dbb3cb 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -473,7 +473,9 @@ extern void proc_clear_tty(struct task_struct *p);
extern struct tty_struct *get_current_tty(void);
extern void tty_default_fops(struct file_operations *fops);
extern struct tty_struct *alloc_tty_struct(void);
-extern int tty_add_file(struct tty_struct *tty, struct file *file);
+extern int tty_alloc_file(struct file *file);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
+extern void tty_free_file(struct file *file);
extern void free_tty_struct(struct tty_struct *tty);
extern void initialize_tty_struct(struct tty_struct *tty,
struct tty_driver *driver, int idx);
@@ -581,6 +583,8 @@ extern int __init tty_init(void);
/* tty_ioctl.c */
extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
+extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
/* serial.c */
@@ -602,8 +606,24 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(void) __acquires(tty_lock);
extern void __lockfunc tty_unlock(void) __releases(tty_lock);
-extern struct task_struct *__big_tty_mutex_owner;
-#define tty_locked() (current == __big_tty_mutex_owner)
+
+/*
+ * this shall be called only from where BTM is held (like close)
+ *
+ * We need this to ensure nobody waits for us to finish while we are waiting.
+ * Without this we were encountering system stalls.
+ *
+ * This should be indeed removed with BTM removal later.
+ *
+ * Locking: BTM required. Nobody is allowed to hold port->mutex.
+ */
+static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
+ long timeout)
+{
+ tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
+ tty_wait_until_sent(tty, timeout);
+ tty_lock();
+}
/*
* wait_event_interruptible_tty -- wait for a condition with the tty lock held
diff --git a/include/linux/types.h b/include/linux/types.h
index 176da8c..57a9723 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -238,6 +238,16 @@ struct ustat {
char f_fpack[6];
};
+/**
+ * struct rcu_head - callback structure for use with RCU
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ */
+struct rcu_head {
+ struct rcu_head *next;
+ void (*func)(struct rcu_head *head);
+};
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index d28c726..2aa2881 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -68,7 +68,7 @@ struct uinput_device {
unsigned char head;
unsigned char tail;
struct input_event buff[UINPUT_BUFFER_SIZE];
- int ff_effects_max;
+ unsigned int ff_effects_max;
struct uinput_request *requests[UINPUT_NUM_REQUESTS];
wait_queue_head_t requests_waitq;
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 665517c..fd99ff9 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -23,7 +23,10 @@ struct uio_map;
/**
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
- * @addr: address of the device's memory
+ * @addr: address of the device's memory (phys_addr is used since
+ * addr can be logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of the
+ * address types)
* @size: size of IO
* @memtype: type of memory addr points to
* @internal_addr: ioremap-ped version of addr, for driver internal use
@@ -31,7 +34,7 @@ struct uio_map;
*/
struct uio_mem {
const char *name;
- unsigned long addr;
+ phys_addr_t addr;
unsigned long size;
int memtype;
void __iomem *internal_addr;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 73c7df4..6f49a1b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -292,6 +292,16 @@ struct usb_host_config {
int extralen;
};
+/* USB2.0 and USB3.0 device BOS descriptor set */
+struct usb_host_bos {
+ struct usb_bos_descriptor *desc;
+
+ /* wireless cap descriptor is handled by wusb */
+ struct usb_ext_cap_descriptor *ext_cap;
+ struct usb_ss_cap_descriptor *ss_cap;
+ struct usb_ss_container_id_descriptor *ss_id;
+};
+
int __usb_get_extra_descriptor(char *buffer, unsigned size,
unsigned char type, void **ptr);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
@@ -381,6 +391,7 @@ struct usb_tt;
* @ep0: endpoint 0 data (default control pipe)
* @dev: generic device interface
* @descriptor: USB device descriptor
+ * @bos: USB device BOS descriptor set
* @config: all of the device's configs
* @actconfig: the active configuration
* @ep_in: array of IN endpoints
@@ -399,6 +410,9 @@ struct usb_tt;
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
* @wusb: device is Wireless USB
+ * @lpm_capable: device supports LPM
+ * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+ * @usb2_hw_lpm_enabled: USB2 hardware LPM enabled
* @string_langid: language ID for strings
* @product: iProduct string, if present (static)
* @manufacturer: iManufacturer string, if present (static)
@@ -442,6 +456,7 @@ struct usb_device {
struct device dev;
struct usb_device_descriptor descriptor;
+ struct usb_host_bos *bos;
struct usb_host_config *config;
struct usb_host_config *actconfig;
@@ -460,6 +475,9 @@ struct usb_device {
unsigned authorized:1;
unsigned authenticated:1;
unsigned wusb:1;
+ unsigned lpm_capable:1;
+ unsigned usb2_hw_lpm_capable:1;
+ unsigned usb2_hw_lpm_enabled:1;
int string_langid;
/* static strings from the device */
@@ -1574,7 +1592,7 @@ usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
return 0;
/* NOTE: only 0x07ff bits are for packet size... */
- return le16_to_cpu(ep->desc.wMaxPacketSize);
+ return usb_endpoint_maxp(&ep->desc);
}
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 0fd3fbd..f32a64e 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -34,6 +34,7 @@
#define __LINUX_USB_CH9_H
#include <linux/types.h> /* __u8 etc */
+#include <asm/byteorder.h> /* le16_to_cpu */
/*-------------------------------------------------------------------------*/
@@ -143,6 +144,11 @@
#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */
#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00
+/*
+ * Suspend Options, Table 9-7 USB 3.0 spec
+ */
+#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
+#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
@@ -570,6 +576,17 @@ static inline int usb_endpoint_is_isoc_out(
return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd);
}
+/**
+ * usb_endpoint_maxp - get endpoint's max packet size
+ * @epd: endpoint to be checked
+ *
+ * Returns @epd's max packet
+ */
+static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
+{
+ return le16_to_cpu(epd->wMaxPacketSize);
+}
+
/*-------------------------------------------------------------------------*/
/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
@@ -851,6 +868,18 @@ enum usb_device_speed {
USB_SPEED_SUPER, /* usb 3.0 */
};
+#ifdef __KERNEL__
+
+/**
+ * usb_speed_string() - Returns human readable-name of the speed.
+ * @speed: The speed to return human-readable name for. If it's not
+ * any of the speeds defined in usb_device_speed enum, string for
+ * USB_SPEED_UNKNOWN will be returned.
+ */
+extern const char *usb_speed_string(enum usb_device_speed speed);
+
+#endif
+
enum usb_device_state {
/* NOTATTACHED isn't in the USB spec, and this state acts
* the same as ATTACHED ... but it's clearer this way.
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 087f4b9..1d3a675 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -437,9 +437,9 @@ static inline void usb_ep_fifo_flush(struct usb_ep *ep)
struct usb_dcd_config_params {
__u8 bU1devExitLat; /* U1 Device exit Latency */
-#define USB_DEFULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
+#define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
__le16 bU2DevExitLat; /* U2 Device exit Latency */
-#define USB_DEFULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */
+#define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */
};
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 0097136..03354d5 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -178,7 +178,7 @@ struct usb_hcd {
* this structure.
*/
unsigned long hcd_priv[0]
- __attribute__ ((aligned(sizeof(unsigned long))));
+ __attribute__ ((aligned(sizeof(s64))));
};
/* 2.4 does this a bit differently ... */
@@ -343,6 +343,7 @@ struct hc_driver {
* address is set
*/
int (*update_device)(struct usb_hcd *, struct usb_device *);
+ int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
index b6b8660..55805f9 100644
--- a/include/linux/usb/r8a66597.h
+++ b/include/linux/usb/r8a66597.h
@@ -48,6 +48,9 @@ struct r8a66597_platdata {
/* (external controller only) set one = WR0_N shorted to WR1_N */
unsigned wr0_shorted_to_wr1:1;
+
+ /* set one = using SUDMAC */
+ unsigned sudmac:1;
};
/* Register definitions */
@@ -417,5 +420,62 @@ struct r8a66597_platdata {
#define USBSPD 0x00C0
#define RTPORT 0x0001
+/* SUDMAC registers */
+#define CH0CFG 0x00
+#define CH1CFG 0x04
+#define CH0BA 0x10
+#define CH1BA 0x14
+#define CH0BBC 0x18
+#define CH1BBC 0x1C
+#define CH0CA 0x20
+#define CH1CA 0x24
+#define CH0CBC 0x28
+#define CH1CBC 0x2C
+#define CH0DEN 0x30
+#define CH1DEN 0x34
+#define DSTSCLR 0x38
+#define DBUFCTRL 0x3C
+#define DINTCTRL 0x40
+#define DINTSTS 0x44
+#define DINTSTSCLR 0x48
+#define CH0SHCTRL 0x50
+#define CH1SHCTRL 0x54
+
+/* SUDMAC Configuration Registers */
+#define SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
+#define RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
+#define LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
+
+/* DMA Enable Registers */
+#define DEN 0x0001 /* b1: DMA Transfer Enable */
+
+/* DMA Status Clear Register */
+#define CH1STCLR 0x0002 /* b2: Ch1 DMA Status Clear */
+#define CH0STCLR 0x0001 /* b1: Ch0 DMA Status Clear */
+
+/* DMA Buffer Control Register */
+#define CH1BUFW 0x0200 /* b9: Ch1 DMA Buffer Data Transfer Enable */
+#define CH0BUFW 0x0100 /* b8: Ch0 DMA Buffer Data Transfer Enable */
+#define CH1BUFS 0x0002 /* b2: Ch1 DMA Buffer Data Status */
+#define CH0BUFS 0x0001 /* b1: Ch0 DMA Buffer Data Status */
+
+/* DMA Interrupt Control Register */
+#define CH1ERRE 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Enable */
+#define CH0ERRE 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Enable */
+#define CH1ENDE 0x0002 /* b2: Ch1 DMA Transfer End Int Enable */
+#define CH0ENDE 0x0001 /* b1: Ch0 DMA Transfer End Int Enable */
+
+/* DMA Interrupt Status Register */
+#define CH1ERRS 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Status */
+#define CH0ERRS 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Status */
+#define CH1ENDS 0x0002 /* b2: Ch1 DMA Transfer End Int Status */
+#define CH0ENDS 0x0001 /* b1: Ch0 DMA Transfer End Int Status */
+
+/* DMA Interrupt Status Clear Register */
+#define CH1ERRC 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Stat Clear */
+#define CH0ERRC 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Stat Clear */
+#define CH1ENDC 0x0002 /* b2: Ch1 DMA Transfer End Int Stat Clear */
+#define CH0ENDC 0x0001 /* b1: Ch0 DMA Transfer End Int Stat Clear */
+
#endif /* __LINUX_USB_R8A66597_H */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 8977431..e5a40c3 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -82,6 +82,13 @@ struct renesas_usbhs_platform_callback {
* get VBUS status function.
*/
int (*get_vbus)(struct platform_device *pdev);
+
+ /*
+ * option:
+ *
+ * VBUS control is needed for Host
+ */
+ int (*set_vbus)(struct platform_device *pdev, int enable);
};
/*
@@ -101,6 +108,8 @@ struct renesas_usbhs_driver_param {
* option:
*
* for BUSWAIT :: BWAIT
+ * see
+ * renesas_usbhs/common.c :: usbhsc_set_buswait()
* */
int buswait_bwait;
@@ -127,6 +136,11 @@ struct renesas_usbhs_driver_param {
* pio <--> dma border.
*/
int pio_dma_border; /* default is 64byte */
+
+ /*
+ * option:
+ */
+ u32 has_otg:1; /* for controlling PWEN/EXTLP */
};
/*
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 7108857..851ebf1 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -61,6 +61,9 @@ struct virtqueue {
* virtqueue_detach_unused_buf: detach first unused buffer
* vq: the struct virtqueue we're talking about.
* Returns NULL or the "data" token handed to add_buf
+ * virtqueue_get_vring_size: return the size of the virtqueue's vring
+ * vq: the struct virtqueue containing the vring of interest.
+ * Returns the size of the vring.
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
@@ -97,6 +100,8 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+
/**
* virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index aed54c5..e5d1220 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -30,6 +30,9 @@
#define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
/* Security namespace */
+#define XATTR_EVM_SUFFIX "evm"
+#define XATTR_NAME_EVM XATTR_SECURITY_PREFIX XATTR_EVM_SUFFIX
+
#define XATTR_SELINUX_SUFFIX "selinux"
#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
@@ -49,6 +52,11 @@
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
+#define XATTR_POSIX_ACL_ACCESS "posix_acl_access"
+#define XATTR_NAME_POSIX_ACL_ACCESS XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_ACCESS
+#define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
+#define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
+
#ifdef __KERNEL__
#include <linux/types.h>
@@ -67,6 +75,12 @@ struct xattr_handler {
size_t size, int flags, int handler_flags);
};
+struct xattr {
+ char *name;
+ void *value;
+ size_t value_len;
+};
+
ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
@@ -78,7 +92,10 @@ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer,
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
-
+ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_XATTR_H */
diff --git a/include/media/pwc-ioctl.h b/include/media/pwc-ioctl.h
index 0f19779..1ed1e61 100644
--- a/include/media/pwc-ioctl.h
+++ b/include/media/pwc-ioctl.h
@@ -53,7 +53,6 @@
*/
#include <linux/types.h>
-#include <linux/version.h>
/* Enumeration of image sizes */
#define PSZ_SQCIF 0x00
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
index 1c647e8..d8fb601 100644
--- a/include/media/videobuf-dma-sg.h
+++ b/include/media/videobuf-dma-sg.h
@@ -34,7 +34,7 @@
* does memory allocation too using vmalloc_32().
*
* videobuf_dma_*()
- * see Documentation/PCI/PCI-DMA-mapping.txt, these functions to
+ * see Documentation/DMA-API-HOWTO.txt, these functions to
* basically the same. The map function does also build a
* scatterlist for the buffer (and unmap frees it ...)
*
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index a6326ef..2d70b95 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -76,11 +76,8 @@ do { \
} \
} while (0)
-#define P9_DUMP_PKT(way, pdu) p9pdu_dump(way, pdu)
-
#else
#define P9_DPRINTK(level, format, arg...) do { } while (0)
-#define P9_DUMP_PKT(way, pdu) do { } while (0)
#endif
@@ -359,6 +356,9 @@ enum p9_qid_t {
/* Room for readdir header */
#define P9_READDIRHDRSZ 24
+/* size of header for zero copy read/write */
+#define P9_ZC_HDR_SZ 4096
+
/**
* struct p9_qid - file system entity information
* @type: 8-bit type &p9_qid_t
@@ -555,10 +555,6 @@ struct p9_rstatfs {
* @tag: transaction id of the request
* @offset: used by marshalling routines to track current position in buffer
* @capacity: used by marshalling routines to track total malloc'd capacity
- * @pubuf: Payload user buffer given by the caller
- * @pkbuf: Payload kernel buffer given by the caller
- * @pbuf_size: pubuf/pkbuf(only one will be !NULL) size to be read/write.
- * @private: For transport layer's use.
* @sdata: payload
*
* &p9_fcall represents the structure for all 9P RPC
@@ -575,10 +571,6 @@ struct p9_fcall {
size_t offset;
size_t capacity;
- char __user *pubuf;
- char *pkbuf;
- size_t pbuf_size;
- void *private;
u8 *sdata;
};
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 55ce72c..fc9b90b 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -151,7 +151,7 @@ struct p9_req_t {
struct p9_client {
spinlock_t lock; /* protect client structure */
- int msize;
+ unsigned int msize;
unsigned char proto_version;
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
@@ -240,8 +240,8 @@ int p9_client_read(struct p9_fid *fid, char *data, char __user *udata,
int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version);
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent);
struct p9_wstat *p9_client_stat(struct p9_fid *fid);
int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst);
int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *attr);
@@ -259,7 +259,7 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
-int p9stat_read(char *, int, struct p9_wstat *, int);
+int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
void p9stat_free(struct p9_wstat *);
int p9_is_proto_dotu(struct p9_client *clnt);
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 83531eb..adcbb20 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -26,13 +26,6 @@
#ifndef NET_9P_TRANSPORT_H
#define NET_9P_TRANSPORT_H
-#define P9_TRANS_PREF_PAYLOAD_MASK 0x1
-
-/* Default. Add Payload to PDU before sending it down to transport layer */
-#define P9_TRANS_PREF_PAYLOAD_DEF 0x0
-/* Send pay load separately to transport layer along with PDU.*/
-#define P9_TRANS_PREF_PAYLOAD_SEP 0x1
-
/**
* struct p9_trans_module - transport module interface
* @list: used to maintain a list of currently available transports
@@ -56,13 +49,14 @@ struct p9_trans_module {
struct list_head list;
char *name; /* name of transport */
int maxsize; /* max message size of transport */
- int pref; /* Preferences of this transport */
int def; /* this transport should be default */
struct module *owner;
int (*create)(struct p9_client *, const char *, char *);
void (*close) (struct p9_client *);
int (*request) (struct p9_client *, struct p9_req_t *req);
int (*cancel) (struct p9_client *, struct p9_req_t *req);
+ int (*zc_request)(struct p9_client *, struct p9_req_t *,
+ char *, char *, int , int, int, int);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index c5dedd8..8d55251 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -52,8 +52,9 @@ struct cfhsi_desc {
/*
* Maximum bytes transferred in one transfer.
*/
-/* TODO: 4096 is temporary... */
-#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * 4096)
+#define CFHSI_MAX_CAIF_FRAME_SZ 4096
+
+#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ)
/* Size of the complete HSI TX buffer. */
#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ)
@@ -75,18 +76,21 @@ struct cfhsi_desc {
#define CFHSI_WAKE_UP_ACK 1
#define CFHSI_WAKE_DOWN_ACK 2
#define CFHSI_AWAKE 3
-#define CFHSI_PENDING_RX 4
-#define CFHSI_SHUTDOWN 6
-#define CFHSI_FLUSH_FIFO 7
+#define CFHSI_WAKELOCK_HELD 4
+#define CFHSI_SHUTDOWN 5
+#define CFHSI_FLUSH_FIFO 6
#ifndef CFHSI_INACTIVITY_TOUT
#define CFHSI_INACTIVITY_TOUT (1 * HZ)
#endif /* CFHSI_INACTIVITY_TOUT */
-#ifndef CFHSI_WAKEUP_TOUT
-#define CFHSI_WAKEUP_TOUT (3 * HZ)
-#endif /* CFHSI_WAKEUP_TOUT */
+#ifndef CFHSI_WAKE_TOUT
+#define CFHSI_WAKE_TOUT (3 * HZ)
+#endif /* CFHSI_WAKE_TOUT */
+#ifndef CFHSI_MAX_RX_RETRIES
+#define CFHSI_MAX_RX_RETRIES (10 * HZ)
+#endif
/* Structure implemented by the CAIF HSI driver. */
struct cfhsi_drv {
@@ -104,11 +108,21 @@ struct cfhsi_dev {
int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev);
int (*cfhsi_wake_up) (struct cfhsi_dev *dev);
int (*cfhsi_wake_down) (struct cfhsi_dev *dev);
+ int (*cfhsi_get_peer_wake) (struct cfhsi_dev *dev, bool *status);
int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy);
int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev);
struct cfhsi_drv *drv;
};
+/* Structure holds status of received CAIF frames processing */
+struct cfhsi_rx_state {
+ int state;
+ int nfrms;
+ int pld_len;
+ int retries;
+ bool piggy_desc;
+};
+
/* Structure implemented by CAIF HSI drivers. */
struct cfhsi {
struct caif_dev_common cfdev;
@@ -118,7 +132,8 @@ struct cfhsi {
struct cfhsi_drv drv;
struct cfhsi_dev *dev;
int tx_state;
- int rx_state;
+ struct cfhsi_rx_state rx_state;
+ unsigned long inactivity_timeout;
int rx_len;
u8 *rx_ptr;
u8 *tx_buf;
@@ -130,13 +145,13 @@ struct cfhsi {
struct list_head list;
struct work_struct wake_up_work;
struct work_struct wake_down_work;
- struct work_struct rx_done_work;
- struct work_struct tx_done_work;
+ struct work_struct out_of_sync_work;
struct workqueue_struct *wq;
wait_queue_head_t wake_up_wait;
wait_queue_head_t wake_down_wait;
wait_queue_head_t flush_fifo_wait;
struct timer_list timer;
+ struct timer_list rx_slowpath_timer;
unsigned long bits;
};
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 2fa8d13..2fa1469 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -30,6 +30,14 @@ static inline int INET_ECN_is_capable(__u8 dsfield)
return dsfield & INET_ECN_ECT_0;
}
+/*
+ * RFC 3168 9.1.1
+ * The full-functionality option for ECN encapsulation is to copy the
+ * ECN codepoint of the inside header to the outside header on
+ * encapsulation if the inside header is not-ECT or ECT, and to set the
+ * ECN codepoint of the outside header to ECT(0) if the ECN codepoint of
+ * the inside header is CE.
+ */
static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
{
outer &= ~INET_ECN_MASK;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index f1a7709..180231c 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -126,7 +126,8 @@ struct inet_timewait_sock {
/* And these are ours. */
unsigned int tw_ipv6only : 1,
tw_transparent : 1,
- tw_pad : 14, /* 14 bits hole */
+ tw_pad : 6, /* 6 bits hole */
+ tw_tos : 8,
tw_ipv6_offset : 16;
kmemcheck_bitfield_end(flags);
unsigned long tw_ttd;
diff --git a/include/net/ip.h b/include/net/ip.h
index aa76c7a..eca0ef7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -165,6 +165,7 @@ struct ip_reply_arg {
int csumoffset; /* u16 offset of csum in iov[0].iov_base */
/* -1 if not needed */
int bound_dev_if;
+ u8 tos;
};
#define IP_REPLY_ARG_NOSRCCHECK 1
@@ -175,7 +176,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
}
void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- struct ip_reply_arg *arg, unsigned int len);
+ const struct ip_reply_arg *arg, unsigned int len);
struct ipv4_config {
int log_martians;
@@ -406,9 +407,18 @@ enum ip_defrag_users {
IP_DEFRAG_VS_OUT,
IP_DEFRAG_VS_FWD,
IP_DEFRAG_AF_PACKET,
+ IP_DEFRAG_MACVLAN,
};
int ip_defrag(struct sk_buff *skb, u32 user);
+#ifdef CONFIG_INET
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
+#else
+static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+ return skb;
+}
+#endif
int ip_frag_mem(struct net *net);
int ip_frag_nqueues(struct net *net);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 1aaf915..8fa4430 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -900,6 +900,7 @@ struct netns_ipvs {
volatile int sync_state;
volatile int master_syncid;
volatile int backup_syncid;
+ struct mutex sync_mutex;
/* multicast interface name */
char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4fc88f3..2eb207e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -46,14 +46,14 @@ struct qdisc_size_table {
struct Qdisc {
int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
struct sk_buff * (*dequeue)(struct Qdisc *dev);
- unsigned flags;
+ unsigned int flags;
#define TCQ_F_BUILTIN 1
#define TCQ_F_INGRESS 2
#define TCQ_F_CAN_BYPASS 4
#define TCQ_F_MQROOT 8
#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
- struct Qdisc_ops *ops;
+ const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
@@ -224,7 +224,7 @@ struct qdisc_skb_cb {
long data[];
};
-static inline int qdisc_qlen(struct Qdisc *q)
+static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
@@ -239,12 +239,12 @@ static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
return &qdisc->q.lock;
}
-static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc;
}
-static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;
}
@@ -260,7 +260,7 @@ static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
* root. This is enforced by holding the RTNL semaphore, which
* all users of this lock accessor must do.
*/
-static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root(qdisc);
@@ -268,7 +268,7 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
+static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
{
struct Qdisc *root = qdisc_root_sleeping(qdisc);
@@ -276,17 +276,17 @@ static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
return qdisc_lock(root);
}
-static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
+static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;
}
-static inline void sch_tree_lock(struct Qdisc *q)
+static inline void sch_tree_lock(const struct Qdisc *q)
{
spin_lock_bh(qdisc_root_sleeping_lock(q));
}
-static inline void sch_tree_unlock(struct Qdisc *q)
+static inline void sch_tree_unlock(const struct Qdisc *q)
{
spin_unlock_bh(qdisc_root_sleeping_lock(q));
}
@@ -319,7 +319,7 @@ static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
}
static inline struct Qdisc_class_common *
-qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
+qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
{
struct Qdisc_class_common *cl;
struct hlist_node *n;
@@ -393,7 +393,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
}
/* Are any of the TX qdiscs changing? */
-static inline bool qdisc_tx_changing(struct net_device *dev)
+static inline bool qdisc_tx_changing(const struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index d97f689..c2e542b 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -10,7 +10,7 @@ extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport);
extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0113d30..e147f42 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -18,7 +18,6 @@
#ifndef _TCP_H
#define _TCP_H
-#define TCP_DEBUG 1
#define FASTRETRANS_DEBUG 1
#include <linux/list.h>
@@ -327,9 +326,9 @@ extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
+ const struct tcphdr *th, unsigned int len);
extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len);
+ const struct tcphdr *th, unsigned int len);
extern void tcp_rcv_space_adjust(struct sock *sk);
extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -401,10 +400,10 @@ extern void tcp_set_keepalive(struct sock *sk, int val);
extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(struct sk_buff *skb,
- struct tcp_options_received *opt_rx, u8 **hvpp,
+extern void tcp_parse_options(const struct sk_buff *skb,
+ struct tcp_options_received *opt_rx, const u8 **hvpp,
int estab);
-extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
+extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
* TCP v4 functions exported for the inet6 API
@@ -450,7 +449,7 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
/* From net/ipv6/syncookies.c */
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
-extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb,
+extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
#else
static inline __u32 cookie_v6_init_sequence(struct sock *sk,
@@ -522,7 +521,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
}
/* tcp.c */
-extern void tcp_get_info(struct sock *, struct tcp_info *);
+extern void tcp_get_info(const struct sock *, struct tcp_info *);
/* Read 'sendfile()'-style from a TCP socket */
typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -532,8 +531,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
extern void tcp_initialize_rcv_mss(struct sock *sk);
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
+extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
+extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
extern void tcp_mtup_init(struct sock *sk);
extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
@@ -574,7 +573,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
/* Compute the actual rto_min value */
static inline u32 tcp_rto_min(struct sock *sk)
{
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
u32 rto_min = TCP_RTO_MIN;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
@@ -820,6 +819,7 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
static inline __u32 tcp_current_ssthresh(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+
if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
return tp->snd_ssthresh;
else
@@ -832,7 +832,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
+extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
/* Slow start with delack produces 3 packets of burst, so that
* it is safe "de facto". This will be the default - same as
@@ -861,7 +861,7 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
static inline void tcp_check_probe_timer(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending)
@@ -1184,8 +1184,9 @@ struct tcp_md5sig_pool {
/* - functions */
extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk);
extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey,
@@ -1208,11 +1209,11 @@ extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
extern void tcp_put_md5sig_pool(void);
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
+extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
unsigned header_len);
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
- struct tcp_md5sig_key *key);
+ const struct tcp_md5sig_key *key);
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
@@ -1225,22 +1226,24 @@ static inline void tcp_write_queue_purge(struct sock *sk)
tcp_clear_all_retrans_hints(tcp_sk(sk));
}
-static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
{
return skb_peek(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
+static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
{
return skb_peek_tail(&sk->sk_write_queue);
}
-static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_next(&sk->sk_write_queue, skb);
}
-static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb)
+static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
+ const struct sk_buff *skb)
{
return skb_queue_prev(&sk->sk_write_queue, skb);
}
@@ -1254,7 +1257,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
-static inline struct sk_buff *tcp_send_head(struct sock *sk)
+static inline struct sk_buff *tcp_send_head(const struct sock *sk)
{
return sk->sk_send_head;
}
@@ -1265,7 +1268,7 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
-static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
+static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_is_last(sk, skb))
sk->sk_send_head = NULL;
@@ -1445,9 +1448,9 @@ struct tcp_sock_af_ops {
struct sock *addr_sk);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
int (*md5_add) (struct sock *sk,
struct sock *addr_sk,
u8 *newkey,
@@ -1464,9 +1467,9 @@ struct tcp_request_sock_ops {
struct request_sock *req);
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
- struct sock *sk,
- struct request_sock *req,
- struct sk_buff *skb);
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb);
#endif
};
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 673a024..5f097ca 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
return 0;
}
-static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh)
+/* Slow-path computation of checksum. Socket is locked. */
+static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
{
+ const struct udp_sock *up = udp_sk(skb->sk);
int cscov = up->len;
+ __wsum csum = 0;
- /*
- * Sender has set `partial coverage' option on UDP-Lite socket
- */
- if (up->pcflag & UDPLITE_SEND_CC) {
+ if (up->pcflag & UDPLITE_SEND_CC) {
+ /*
+ * Sender has set `partial coverage' option on UDP-Lite socket.
+ * The special case "up->pcslen == 0" signifies full coverage.
+ */
if (up->pcslen < up->len) {
- /* up->pcslen == 0 means that full coverage is required,
- * partial coverage only if 0 < up->pcslen < up->len */
- if (0 < up->pcslen) {
- cscov = up->pcslen;
- }
- uh->len = htons(up->pcslen);
+ if (0 < up->pcslen)
+ cscov = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
}
- /*
- * NOTE: Causes for the error case `up->pcslen > up->len':
- * (i) Application error (will not be penalized).
- * (ii) Payload too big for send buffer: data is split
- * into several packets, each with its own header.
- * In this case (e.g. last segment), coverage may
- * exceed packet length.
- * Since packets with coverage length > packet length are
- * illegal, we fall back to the defaults here.
- */
+ /*
+ * NOTE: Causes for the error case `up->pcslen > up->len':
+ * (i) Application error (will not be penalized).
+ * (ii) Payload too big for send buffer: data is split
+ * into several packets, each with its own header.
+ * In this case (e.g. last segment), coverage may
+ * exceed packet length.
+ * Since packets with coverage length > packet length are
+ * illegal, we fall back to the defaults here.
+ */
}
- return cscov;
-}
-
-static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
-{
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
- __wsum csum = 0;
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
return csum;
}
+/* Fast-path computation of checksum. Socket may not be locked. */
static inline __wsum udplite_csum(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb));
+ const struct udp_sock *up = udp_sk(skb->sk);
const int off = skb_transport_offset(skb);
- const int len = skb->len - off;
+ int len = skb->len - off;
+ if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) {
+ if (0 < up->pcslen)
+ len = up->pcslen;
+ udp_hdr(skb)->len = htons(up->pcslen);
+ }
skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */
- return skb_checksum(skb, off, min(cscov, len), 0);
+ return skb_checksum(skb, off, len, 0);
}
extern void udplite4_register(void);
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
index c5c5e00..f05fa82 100644
--- a/include/scsi/osd_ore.h
+++ b/include/scsi/osd_ore.h
@@ -34,15 +34,30 @@ struct ore_comp {
struct ore_layout {
/* Our way of looking at the data_map */
+ enum pnfs_osd_raid_algorithm4
+ raid_algorithm;
unsigned stripe_unit;
unsigned mirrors_p1;
unsigned group_width;
+ unsigned parity;
u64 group_depth;
unsigned group_count;
+
+ /* Cached often needed calculations filled in by
+ * ore_verify_layout
+ */
+ unsigned long max_io_length; /* Max length that should be passed to
+ * ore_get_rw_state
+ */
+};
+
+struct ore_dev {
+ struct osd_dev *od;
};
struct ore_components {
+ unsigned first_dev; /* First logical device no */
unsigned numdevs; /* Num of devices in array */
/* If @single_comp == EC_SINGLE_COMP, @comps points to a single
* component. else there are @numdevs components
@@ -51,20 +66,60 @@ struct ore_components {
EC_SINGLE_COMP = 0, EC_MULTPLE_COMPS = 0xffffffff
} single_comp;
struct ore_comp *comps;
- struct osd_dev **ods; /* osd_dev array */
+
+ /* Array of pointers to ore_dev-* . User will usually have these pointed
+ * too a bigger struct which contain an "ore_dev ored" member and use
+ * container_of(oc->ods[i], struct foo_dev, ored) to access the bigger
+ * structure.
+ */
+ struct ore_dev **ods;
+};
+
+/* ore_comp_dev Recievies a logical device index */
+static inline struct osd_dev *ore_comp_dev(
+ const struct ore_components *oc, unsigned i)
+{
+ BUG_ON((i < oc->first_dev) || (oc->first_dev + oc->numdevs <= i));
+ return oc->ods[i - oc->first_dev]->od;
+}
+
+static inline void ore_comp_set_dev(
+ struct ore_components *oc, unsigned i, struct osd_dev *od)
+{
+ oc->ods[i - oc->first_dev]->od = od;
+}
+
+struct ore_striping_info {
+ u64 offset;
+ u64 obj_offset;
+ u64 length;
+ u64 first_stripe_start; /* only used in raid writes */
+ u64 M; /* for truncate */
+ unsigned bytes_in_stripe;
+ unsigned dev;
+ unsigned par_dev;
+ unsigned unit_off;
+ unsigned cur_pg;
+ unsigned cur_comp;
};
struct ore_io_state;
typedef void (*ore_io_done_fn)(struct ore_io_state *ios, void *private);
+struct _ore_r4w_op {
+ /* @Priv given here is passed ios->private */
+ struct page * (*get_page)(void *priv, u64 page_index, bool *uptodate);
+ void (*put_page)(void *priv, struct page *page);
+};
struct ore_io_state {
struct kref kref;
+ struct ore_striping_info si;
void *private;
ore_io_done_fn done;
struct ore_layout *layout;
- struct ore_components *comps;
+ struct ore_components *oc;
/* Global read/write IO*/
loff_t offset;
@@ -84,6 +139,16 @@ struct ore_io_state {
bool reading;
+ /* House keeping of Parity pages */
+ bool extra_part_alloc;
+ struct page **parity_pages;
+ unsigned max_par_pages;
+ unsigned cur_par_page;
+ unsigned sgs_per_dev;
+ struct __stripe_pages_2d *sp2d;
+ struct ore_io_state *ios_read_4_write;
+ const struct _ore_r4w_op *r4w;
+
/* Variable array of size numdevs */
unsigned numdevs;
struct ore_per_dev_state {
@@ -91,7 +156,10 @@ struct ore_io_state {
struct bio *bio;
loff_t offset;
unsigned length;
+ unsigned last_sgs_total;
unsigned dev;
+ struct osd_sg_entry *sglist;
+ unsigned cur_sg;
} per_dev[];
};
@@ -102,6 +170,9 @@ static inline unsigned ore_io_state_size(unsigned numdevs)
}
/* ore.c */
+int ore_verify_layout(unsigned total_comps, struct ore_layout *layout);
+void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
+ u64 length, struct ore_striping_info *si);
int ore_get_rw_state(struct ore_layout *layout, struct ore_components *comps,
bool is_reading, u64 offset, u64 length,
struct ore_io_state **ios);
@@ -109,7 +180,10 @@ int ore_get_io_state(struct ore_layout *layout, struct ore_components *comps,
struct ore_io_state **ios);
void ore_put_io_state(struct ore_io_state *ios);
-int ore_check_io(struct ore_io_state *ios, u64 *resid);
+typedef void (*ore_on_dev_error)(struct ore_io_state *ios, struct ore_dev *od,
+ unsigned dev_index, enum osd_err_priority oep,
+ u64 dev_offset, u64 dev_len);
+int ore_check_io(struct ore_io_state *ios, ore_on_dev_error rep);
int ore_create(struct ore_io_state *ios);
int ore_remove(struct ore_io_state *ios);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 57e71fa..54cb079 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -29,7 +29,7 @@
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/bitops.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -373,7 +373,7 @@ struct snd_pcm_substream {
int number;
char name[32]; /* substream name */
int stream; /* stream (direction) */
- struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */
+ struct pm_qos_request latency_pm_qos_req; /* pm_qos request */
size_t buffer_bytes_max; /* limit ring buffer size */
struct snd_dma_buffer dma_buffer;
unsigned int dma_buf_id;
diff --git a/include/target/configfs_macros.h b/include/target/configfs_macros.h
index 7fe7460..a0fc85b 100644
--- a/include/target/configfs_macros.h
+++ b/include/target/configfs_macros.h
@@ -30,8 +30,8 @@
* Added CONFIGFS_EATTR() macros from original configfs.h macros
* Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
- * Please read Documentation/filesystems/configfs.txt before using the
- * configfs interface, ESPECIALLY the parts about reference counts and
+ * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * the configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 2704065..35aa786f 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -10,10 +10,7 @@
#include <net/tcp.h>
#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
-#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
-/* Used by transport_generic_allocate_iovecs() */
-#define TRANSPORT_IOV_DATA_BUFFER 5
/* Maximum Number of LUNs per Target Portal Group */
/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
@@ -75,32 +72,26 @@ enum transport_tpg_type_table {
};
/* Used for generate timer flags */
-enum timer_flags_table {
- TF_RUNNING = 0x01,
- TF_STOP = 0x02,
+enum se_task_flags {
+ TF_ACTIVE = (1 << 0),
+ TF_SENT = (1 << 1),
+ TF_REQUEST_STOP = (1 << 2),
};
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1,
- TRANSPORT_DEFERRED_CMD = 2,
TRANSPORT_WRITE_PENDING = 3,
TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5,
- TRANSPORT_COMPLETE_OK = 6,
- TRANSPORT_COMPLETE_FAILURE = 7,
- TRANSPORT_COMPLETE_TIMEOUT = 8,
+ TRANSPORT_COMPLETE = 6,
TRANSPORT_PROCESS_TMR = 9,
- TRANSPORT_TMR_COMPLETE = 10,
TRANSPORT_ISTATE_PROCESSING = 11,
- TRANSPORT_ISTATE_PROCESSED = 12,
- TRANSPORT_KILL = 13,
- TRANSPORT_REMOVE = 14,
- TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_FREE_CMD_INTR = 17,
TRANSPORT_COMPLETE_QF_WP = 18,
+ TRANSPORT_COMPLETE_QF_OK = 19,
};
/* Used for struct se_cmd->se_cmd_flags */
@@ -125,7 +116,6 @@ enum se_cmd_flags_table {
SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
- SCF_EMULATE_QUEUE_FULL = 0x02000000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -401,34 +391,22 @@ struct se_queue_obj {
} ____cacheline_aligned;
struct se_task {
- unsigned char task_sense;
- struct scatterlist *task_sg;
- u32 task_sg_nents;
- struct scatterlist *task_sg_bidi;
- u8 task_scsi_status;
- u8 task_flags;
- int task_error_status;
- int task_state_flags;
- bool task_padded_sg;
unsigned long long task_lba;
- u32 task_no;
- u32 task_sectors;
- u32 task_size;
+ u32 task_sectors;
+ u32 task_size;
+ struct se_cmd *task_se_cmd;
+ struct scatterlist *task_sg;
+ u32 task_sg_nents;
+ u16 task_flags;
+ u8 task_sense;
+ u8 task_scsi_status;
+ int task_error_status;
enum dma_data_direction task_data_direction;
- struct se_cmd *task_se_cmd;
- struct se_device *se_dev;
+ atomic_t task_state_active;
+ struct list_head t_list;
+ struct list_head t_execute_list;
+ struct list_head t_state_list;
struct completion task_stop_comp;
- atomic_t task_active;
- atomic_t task_execute_queue;
- atomic_t task_timeout;
- atomic_t task_sent;
- atomic_t task_stop;
- atomic_t task_state_active;
- struct timer_list task_timer;
- struct se_device *se_obj_ptr;
- struct list_head t_list;
- struct list_head t_execute_list;
- struct list_head t_state_list;
} ____cacheline_aligned;
struct se_cmd {
@@ -446,8 +424,6 @@ struct se_cmd {
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport protocol dependent state for out of order CmdSNs */
- int deferred_t_state;
/* Transport specific error status */
int transport_error_status;
/* See se_cmd_flags_table */
@@ -461,7 +437,6 @@ struct se_cmd {
u32 orig_fe_lun;
/* Persistent Reservation key */
u64 pr_res_key;
- atomic_t transport_sent;
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
@@ -479,10 +454,7 @@ struct se_cmd {
struct list_head se_queue_node;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
- void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
- void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
void (*transport_complete_callback)(struct se_cmd *);
- int (*transport_qf_callback)(struct se_cmd *);
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
@@ -495,7 +467,6 @@ struct se_cmd {
atomic_t t_se_count;
atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left;
- atomic_t t_task_cdbs_timeout_left;
atomic_t t_task_cdbs_sent;
atomic_t t_transport_aborted;
atomic_t t_transport_active;
@@ -503,7 +474,6 @@ struct se_cmd {
atomic_t t_transport_queue_active;
atomic_t t_transport_sent;
atomic_t t_transport_stop;
- atomic_t t_transport_timeout;
atomic_t transport_dev_active;
atomic_t transport_lun_active;
atomic_t transport_lun_fe_stop;
@@ -514,6 +484,8 @@ struct se_cmd {
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
+ struct work_struct work;
+
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
@@ -670,7 +642,6 @@ struct se_dev_attrib {
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
- u32 task_timeout;
u32 max_unmap_lba_count;
u32 max_unmap_block_desc_count;
u32 unmap_granularity;
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
index bd55968..d5876e1 100644
--- a/include/target/target_core_tmr.h
+++ b/include/target/target_core_tmr.h
@@ -27,7 +27,7 @@ enum tcm_tmrsp_table {
extern struct kmem_cache *se_tmr_req_cache;
-extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
+extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
extern void core_tmr_release_req(struct se_tmr_req *);
extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 46aae4f..a037a1a 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -22,10 +22,9 @@
#define PYX_TRANSPORT_LU_COMM_FAILURE -7
#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_TASK_TIMEOUT -10
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -11
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -12
-#define PYX_TRANSPORT_USE_SENSE_REASON -13
+#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
+#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
+#define PYX_TRANSPORT_USE_SENSE_REASON -12
#ifndef SAM_STAT_RESERVATION_CONFLICT
#define SAM_STAT_RESERVATION_CONFLICT 0x18
@@ -38,16 +37,6 @@
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
-/* For SE OBJ Plugins, in seconds */
-#define TRANSPORT_TIMEOUT_TUR 10
-#define TRANSPORT_TIMEOUT_TYPE_DISK 60
-#define TRANSPORT_TIMEOUT_TYPE_ROM 120
-#define TRANSPORT_TIMEOUT_TYPE_TAPE 600
-#define TRANSPORT_TIMEOUT_TYPE_OTHER 300
-
-/* For se_task->task_state_flags */
-#define TSF_EXCEPTION_CLEARED 0x01
-
/*
* struct se_subsystem_dev->su_dev_flags
*/
@@ -64,8 +53,6 @@
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
-/* 10 Minutes */
-#define DA_TASK_TIMEOUT_MAX 600
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
@@ -110,16 +97,13 @@
#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
-struct se_mem;
struct se_subsystem_api;
-extern struct kmem_cache *se_mem_cache;
-
extern int init_se_kmem_caches(void);
extern void release_se_kmem_caches(void);
extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *);
-extern int transport_subsystem_check_init(void);
+extern void transport_subsystem_check_init(void);
extern int transport_subsystem_register(struct se_subsystem_api *);
extern void transport_subsystem_release(struct se_subsystem_api *);
extern void transport_load_plugins(void);
@@ -134,7 +118,6 @@ extern void transport_free_session(struct se_session *);
extern void transport_deregister_session_configfs(struct se_session *);
extern void transport_deregister_session(struct se_session *);
extern void transport_cmd_finish_abort(struct se_cmd *, int);
-extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
extern void transport_complete_sync_cache(struct se_cmd *, int);
extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
@@ -142,6 +125,8 @@ extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_device *);
extern void transport_remove_task_from_execute_queue(struct se_task *,
struct se_device *);
+extern void __transport_remove_task_from_execute_queue(struct se_task *,
+ struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -169,29 +154,24 @@ extern void transport_init_se_cmd(struct se_cmd *,
unsigned char *);
void *transport_kmap_first_data_page(struct se_cmd *cmd);
void transport_kunmap_first_data_page(struct se_cmd *cmd);
-extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
-extern int transport_generic_handle_cdb(struct se_cmd *);
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void transport_generic_free_cmd_intr(struct se_cmd *);
-extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
+extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
+extern void transport_wait_for_tasks(struct se_cmd *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd(struct se_cmd *);
-extern void transport_generic_free_cmd(struct se_cmd *, int, int);
+extern void transport_generic_free_cmd(struct se_cmd *, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
-extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
-extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
- struct scatterlist *, struct se_mem *,
- struct se_mem **, u32 *, u32 *);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
extern int transport_generic_new_cmd(struct se_cmd *);
@@ -200,6 +180,7 @@ extern int transport_generic_do_tmr(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
/* From target_core_cdb.c */
extern int transport_emulate_control_cdb(struct se_task *);
+extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
/*
* Each se_transport_task_t can have N number of possible struct se_task's
@@ -227,6 +208,10 @@ struct se_subsystem_api {
* Transport Type.
*/
u8 transport_type;
+
+ unsigned int fua_write_emulated : 1;
+ unsigned int write_cache_emulated : 1;
+
/*
* struct module for struct se_hba references
*/
@@ -236,18 +221,6 @@ struct se_subsystem_api {
*/
struct list_head sub_api_list;
/*
- * For SCF_SCSI_NON_DATA_CDB
- */
- int (*cdb_none)(struct se_task *);
- /*
- * For SCF_SCSI_DATA_SG_IO_CDB
- */
- int (*map_data_SG)(struct se_task *);
- /*
- * For SCF_SCSI_CONTROL_SG_IO_CDB
- */
- int (*map_control_SG)(struct se_task *);
- /*
* attach_hba():
*/
int (*attach_hba)(struct se_hba *, u32);
@@ -275,22 +248,6 @@ struct se_subsystem_api {
void (*free_device)(void *);
/*
- * dpo_emulated():
- */
- int (*dpo_emulated)(struct se_device *);
- /*
- * fua_write_emulated():
- */
- int (*fua_write_emulated)(struct se_device *);
- /*
- * fua_read_emulated():
- */
- int (*fua_read_emulated)(struct se_device *);
- /*
- * write_cache_emulated():
- */
- int (*write_cache_emulated)(struct se_device *);
- /*
* transport_complete():
*
* Use transport_generic_complete() for majority of DAS transport
@@ -331,10 +288,6 @@ struct se_subsystem_api {
ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
char *);
/*
- * get_cdb():
- */
- unsigned char *(*get_cdb)(struct se_task *);
- /*
* get_device_rev():
*/
u32 (*get_device_rev)(struct se_device *);
diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h
new file mode 100644
index 0000000..beeaed8
--- /dev/null
+++ b/include/trace/events/9p.h
@@ -0,0 +1,176 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM 9p
+
+#if !defined(_TRACE_9P_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_9P_H
+
+#include <linux/tracepoint.h>
+
+#define show_9p_op(type) \
+ __print_symbolic(type, \
+ { P9_TLERROR, "P9_TLERROR" }, \
+ { P9_RLERROR, "P9_RLERROR" }, \
+ { P9_TSTATFS, "P9_TSTATFS" }, \
+ { P9_RSTATFS, "P9_RSTATFS" }, \
+ { P9_TLOPEN, "P9_TLOPEN" }, \
+ { P9_RLOPEN, "P9_RLOPEN" }, \
+ { P9_TLCREATE, "P9_TLCREATE" }, \
+ { P9_RLCREATE, "P9_RLCREATE" }, \
+ { P9_TSYMLINK, "P9_TSYMLINK" }, \
+ { P9_RSYMLINK, "P9_RSYMLINK" }, \
+ { P9_TMKNOD, "P9_TMKNOD" }, \
+ { P9_RMKNOD, "P9_RMKNOD" }, \
+ { P9_TRENAME, "P9_TRENAME" }, \
+ { P9_RRENAME, "P9_RRENAME" }, \
+ { P9_TREADLINK, "P9_TREADLINK" }, \
+ { P9_RREADLINK, "P9_RREADLINK" }, \
+ { P9_TGETATTR, "P9_TGETATTR" }, \
+ { P9_RGETATTR, "P9_RGETATTR" }, \
+ { P9_TSETATTR, "P9_TSETATTR" }, \
+ { P9_RSETATTR, "P9_RSETATTR" }, \
+ { P9_TXATTRWALK, "P9_TXATTRWALK" }, \
+ { P9_RXATTRWALK, "P9_RXATTRWALK" }, \
+ { P9_TXATTRCREATE, "P9_TXATTRCREATE" }, \
+ { P9_RXATTRCREATE, "P9_RXATTRCREATE" }, \
+ { P9_TREADDIR, "P9_TREADDIR" }, \
+ { P9_RREADDIR, "P9_RREADDIR" }, \
+ { P9_TFSYNC, "P9_TFSYNC" }, \
+ { P9_RFSYNC, "P9_RFSYNC" }, \
+ { P9_TLOCK, "P9_TLOCK" }, \
+ { P9_RLOCK, "P9_RLOCK" }, \
+ { P9_TGETLOCK, "P9_TGETLOCK" }, \
+ { P9_RGETLOCK, "P9_RGETLOCK" }, \
+ { P9_TLINK, "P9_TLINK" }, \
+ { P9_RLINK, "P9_RLINK" }, \
+ { P9_TMKDIR, "P9_TMKDIR" }, \
+ { P9_RMKDIR, "P9_RMKDIR" }, \
+ { P9_TRENAMEAT, "P9_TRENAMEAT" }, \
+ { P9_RRENAMEAT, "P9_RRENAMEAT" }, \
+ { P9_TUNLINKAT, "P9_TUNLINKAT" }, \
+ { P9_RUNLINKAT, "P9_RUNLINKAT" }, \
+ { P9_TVERSION, "P9_TVERSION" }, \
+ { P9_RVERSION, "P9_RVERSION" }, \
+ { P9_TAUTH, "P9_TAUTH" }, \
+ { P9_RAUTH, "P9_RAUTH" }, \
+ { P9_TATTACH, "P9_TATTACH" }, \
+ { P9_RATTACH, "P9_RATTACH" }, \
+ { P9_TERROR, "P9_TERROR" }, \
+ { P9_RERROR, "P9_RERROR" }, \
+ { P9_TFLUSH, "P9_TFLUSH" }, \
+ { P9_RFLUSH, "P9_RFLUSH" }, \
+ { P9_TWALK, "P9_TWALK" }, \
+ { P9_RWALK, "P9_RWALK" }, \
+ { P9_TOPEN, "P9_TOPEN" }, \
+ { P9_ROPEN, "P9_ROPEN" }, \
+ { P9_TCREATE, "P9_TCREATE" }, \
+ { P9_RCREATE, "P9_RCREATE" }, \
+ { P9_TREAD, "P9_TREAD" }, \
+ { P9_RREAD, "P9_RREAD" }, \
+ { P9_TWRITE, "P9_TWRITE" }, \
+ { P9_RWRITE, "P9_RWRITE" }, \
+ { P9_TCLUNK, "P9_TCLUNK" }, \
+ { P9_RCLUNK, "P9_RCLUNK" }, \
+ { P9_TREMOVE, "P9_TREMOVE" }, \
+ { P9_RREMOVE, "P9_RREMOVE" }, \
+ { P9_TSTAT, "P9_TSTAT" }, \
+ { P9_RSTAT, "P9_RSTAT" }, \
+ { P9_TWSTAT, "P9_TWSTAT" }, \
+ { P9_RWSTAT, "P9_RWSTAT" })
+
+TRACE_EVENT(9p_client_req,
+ TP_PROTO(struct p9_client *clnt, int8_t type, int tag),
+
+ TP_ARGS(clnt, type, tag),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u32, tag )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = type;
+ __entry->tag = tag;
+ ),
+
+ TP_printk("client %lu request %s tag %d",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag)
+ );
+
+TRACE_EVENT(9p_client_res,
+ TP_PROTO(struct p9_client *clnt, int8_t type, int tag, int err),
+
+ TP_ARGS(clnt, type, tag, err),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u32, tag )
+ __field( __u32, err )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = type;
+ __entry->tag = tag;
+ __entry->err = err;
+ ),
+
+ TP_printk("client %lu response %s tag %d err %d",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag, __entry->err)
+);
+
+/* dump 32 bytes of protocol data */
+#define P9_PROTO_DUMP_SZ 32
+TRACE_EVENT(9p_protocol_dump,
+ TP_PROTO(struct p9_client *clnt, struct p9_fcall *pdu),
+
+ TP_ARGS(clnt, pdu),
+
+ TP_STRUCT__entry(
+ __field( void *, clnt )
+ __field( __u8, type )
+ __field( __u16, tag )
+ __array( unsigned char, line, P9_PROTO_DUMP_SZ )
+ ),
+
+ TP_fast_assign(
+ __entry->clnt = clnt;
+ __entry->type = pdu->id;
+ __entry->tag = pdu->tag;
+ memcpy(__entry->line, pdu->sdata, P9_PROTO_DUMP_SZ);
+ ),
+ TP_printk("clnt %lu %s(tag = %d)\n%.3x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n"
+ "%.3x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ (long)__entry->clnt, show_9p_op(__entry->type),
+ __entry->tag, 0,
+ __entry->line[0], __entry->line[1],
+ __entry->line[2], __entry->line[3],
+ __entry->line[4], __entry->line[5],
+ __entry->line[6], __entry->line[7],
+ __entry->line[8], __entry->line[9],
+ __entry->line[10], __entry->line[11],
+ __entry->line[12], __entry->line[13],
+ __entry->line[14], __entry->line[15],
+ 16,
+ __entry->line[16], __entry->line[17],
+ __entry->line[18], __entry->line[19],
+ __entry->line[20], __entry->line[21],
+ __entry->line[22], __entry->line[23],
+ __entry->line[24], __entry->line[25],
+ __entry->line[26], __entry->line[27],
+ __entry->line[28], __entry->line[29],
+ __entry->line[30], __entry->line[31])
+ );
+
+#endif /* _TRACE_9P_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
new file mode 100644
index 0000000..669fbd6
--- /dev/null
+++ b/include/trace/events/rcu.h
@@ -0,0 +1,459 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rcu
+
+#if !defined(_TRACE_RCU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RCU_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Tracepoint for start/end markers used for utilization calculations.
+ * By convention, the string is of the following forms:
+ *
+ * "Start <activity>" -- Mark the start of the specified activity,
+ * such as "context switch". Nesting is permitted.
+ * "End <activity>" -- Mark the end of the specified activity.
+ *
+ * An "@" character within "<activity>" is a comment character: Data
+ * reduction scripts will ignore the "@" and the remainder of the line.
+ */
+TRACE_EVENT(rcu_utilization,
+
+ TP_PROTO(char *s),
+
+ TP_ARGS(s),
+
+ TP_STRUCT__entry(
+ __field(char *, s)
+ ),
+
+ TP_fast_assign(
+ __entry->s = s;
+ ),
+
+ TP_printk("%s", __entry->s)
+);
+
+#ifdef CONFIG_RCU_TRACE
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
+
+/*
+ * Tracepoint for grace-period events: starting and ending a grace
+ * period ("start" and "end", respectively), a CPU noting the start
+ * of a new grace period or the end of an old grace period ("cpustart"
+ * and "cpuend", respectively), a CPU passing through a quiescent
+ * state ("cpuqs"), a CPU coming online or going offline ("cpuonl"
+ * and "cpuofl", respectively), and a CPU being kicked for being too
+ * long in dyntick-idle mode ("kick").
+ */
+TRACE_EVENT(rcu_grace_period,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+
+ TP_ARGS(rcuname, gpnum, gpevent),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(char *, gpevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->gpevent = gpevent;
+ ),
+
+ TP_printk("%s %lu %s",
+ __entry->rcuname, __entry->gpnum, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for grace-period-initialization events. These are
+ * distinguished by the type of RCU, the new grace-period number, the
+ * rcu_node structure level, the starting and ending CPU covered by the
+ * rcu_node structure, and the mask of CPUs that will be waited for.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_grace_period_init,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+ int grplo, int grphi, unsigned long qsmask),
+
+ TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(unsigned long, qsmask)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->qsmask = qsmask;
+ ),
+
+ TP_printk("%s %lu %u %d %d %lx",
+ __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->qsmask)
+);
+
+/*
+ * Tracepoint for tasks blocking within preemptible-RCU read-side
+ * critical sections. Track the type of RCU (which one day might
+ * include SRCU), the grace-period number that the task is blocking
+ * (the current or the next), and the task's PID.
+ */
+TRACE_EVENT(rcu_preempt_task,
+
+ TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+
+ TP_ARGS(rcuname, pid, gpnum),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->pid = pid;
+ ),
+
+ TP_printk("%s %lu %d",
+ __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for tasks that blocked within a given preemptible-RCU
+ * read-side critical section exiting that critical section. Track the
+ * type of RCU (which one day might include SRCU) and the task's PID.
+ */
+TRACE_EVENT(rcu_unlock_preempted_task,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+
+ TP_ARGS(rcuname, gpnum, pid),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, pid)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->pid = pid;
+ ),
+
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+);
+
+/*
+ * Tracepoint for quiescent-state-reporting events. These are
+ * distinguished by the type of RCU, the grace-period number, the
+ * mask of quiescent lower-level entities, the rcu_node structure level,
+ * the starting and ending CPU covered by the rcu_node structure, and
+ * whether there are any blocked tasks blocking the current grace period.
+ * All but the type of RCU are extracted from the rcu_node structure.
+ */
+TRACE_EVENT(rcu_quiescent_state_report,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum,
+ unsigned long mask, unsigned long qsmask,
+ u8 level, int grplo, int grphi, int gp_tasks),
+
+ TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(unsigned long, mask)
+ __field(unsigned long, qsmask)
+ __field(u8, level)
+ __field(int, grplo)
+ __field(int, grphi)
+ __field(u8, gp_tasks)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->mask = mask;
+ __entry->qsmask = qsmask;
+ __entry->level = level;
+ __entry->grplo = grplo;
+ __entry->grphi = grphi;
+ __entry->gp_tasks = gp_tasks;
+ ),
+
+ TP_printk("%s %lu %lx>%lx %u %d %d %u",
+ __entry->rcuname, __entry->gpnum,
+ __entry->mask, __entry->qsmask, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gp_tasks)
+);
+
+/*
+ * Tracepoint for quiescent states detected by force_quiescent_state().
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "ofl" for CPU offline,
+ * or "kick" when kicking a CPU that has been in dyntick-idle mode for
+ * too long.
+ */
+TRACE_EVENT(rcu_fqs,
+
+ TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+
+ TP_ARGS(rcuname, gpnum, cpu, qsevent),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(unsigned long, gpnum)
+ __field(int, cpu)
+ __field(char *, qsevent)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->gpnum = gpnum;
+ __entry->cpu = cpu;
+ __entry->qsevent = qsevent;
+ ),
+
+ TP_printk("%s %lu %d %s",
+ __entry->rcuname, __entry->gpnum,
+ __entry->cpu, __entry->qsevent)
+);
+
+#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) */
+
+/*
+ * Tracepoint for dyntick-idle entry/exit events. These take a string
+ * as argument: "Start" for entering dyntick-idle mode and "End" for
+ * leaving it.
+ */
+TRACE_EVENT(rcu_dyntick,
+
+ TP_PROTO(char *polarity),
+
+ TP_ARGS(polarity),
+
+ TP_STRUCT__entry(
+ __field(char *, polarity)
+ ),
+
+ TP_fast_assign(
+ __entry->polarity = polarity;
+ ),
+
+ TP_printk("%s", __entry->polarity)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback function.
+ * The first argument is the type of RCU, the second argument is
+ * a pointer to the RCU callback itself, and the third element is the
+ * new RCU callback queue length for the current CPU.
+ */
+TRACE_EVENT(rcu_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen),
+
+ TP_ARGS(rcuname, rhp, qlen),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(void *, func)
+ __field(long, qlen)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->func = rhp->func;
+ __entry->qlen = qlen;
+ ),
+
+ TP_printk("%s rhp=%p func=%pf %ld",
+ __entry->rcuname, __entry->rhp, __entry->func, __entry->qlen)
+);
+
+/*
+ * Tracepoint for the registration of a single RCU callback of the special
+ * kfree() form. The first argument is the RCU type, the second argument
+ * is a pointer to the RCU callback, the third argument is the offset
+ * of the callback within the enclosing RCU-protected data structure,
+ * and the fourth argument is the new RCU callback queue length for the
+ * current CPU.
+ */
+TRACE_EVENT(rcu_kfree_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+ long qlen),
+
+ TP_ARGS(rcuname, rhp, offset, qlen),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(unsigned long, offset)
+ __field(long, qlen)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->offset = offset;
+ __entry->qlen = qlen;
+ ),
+
+ TP_printk("%s rhp=%p func=%ld %ld",
+ __entry->rcuname, __entry->rhp, __entry->offset,
+ __entry->qlen)
+);
+
+/*
+ * Tracepoint for marking the beginning rcu_do_batch, performed to start
+ * RCU callback invocation. The first argument is the RCU flavor,
+ * the second is the total number of callbacks (including those that
+ * are not yet ready to be invoked), and the third argument is the
+ * current RCU-callback batch limit.
+ */
+TRACE_EVENT(rcu_batch_start,
+
+ TP_PROTO(char *rcuname, long qlen, int blimit),
+
+ TP_ARGS(rcuname, qlen, blimit),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(long, qlen)
+ __field(int, blimit)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->qlen = qlen;
+ __entry->blimit = blimit;
+ ),
+
+ TP_printk("%s CBs=%ld bl=%d",
+ __entry->rcuname, __entry->qlen, __entry->blimit)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback function.
+ * The first argument is the type of RCU, and the second argument is
+ * a pointer to the RCU callback itself.
+ */
+TRACE_EVENT(rcu_invoke_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp),
+
+ TP_ARGS(rcuname, rhp),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(void *, func)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->func = rhp->func;
+ ),
+
+ TP_printk("%s rhp=%p func=%pf",
+ __entry->rcuname, __entry->rhp, __entry->func)
+);
+
+/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree() form. The first argument is the RCU flavor, the second
+ * argument is a pointer to the RCU callback, and the third argument
+ * is the offset of the callback within the enclosing RCU-protected
+ * data structure.
+ */
+TRACE_EVENT(rcu_invoke_kfree_callback,
+
+ TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+
+ TP_ARGS(rcuname, rhp, offset),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(void *, rhp)
+ __field(unsigned long, offset)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->rhp = rhp;
+ __entry->offset = offset;
+ ),
+
+ TP_printk("%s rhp=%p func=%ld",
+ __entry->rcuname, __entry->rhp, __entry->offset)
+);
+
+/*
+ * Tracepoint for exiting rcu_do_batch after RCU callbacks have been
+ * invoked. The first argument is the name of the RCU flavor and
+ * the second argument is number of callbacks actually invoked.
+ */
+TRACE_EVENT(rcu_batch_end,
+
+ TP_PROTO(char *rcuname, int callbacks_invoked),
+
+ TP_ARGS(rcuname, callbacks_invoked),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(int, callbacks_invoked)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->callbacks_invoked = callbacks_invoked;
+ ),
+
+ TP_printk("%s CBs-invoked=%d",
+ __entry->rcuname, __entry->callbacks_invoked)
+);
+
+#else /* #ifdef CONFIG_RCU_TRACE */
+
+#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, qsmask) do { } while (0)
+#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks) do { } while (0)
+#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_dyntick(polarity) do { } while (0)
+#define trace_rcu_callback(rcuname, rhp, qlen) do { } while (0)
+#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen) do { } while (0)
+#define trace_rcu_batch_start(rcuname, qlen, blimit) do { } while (0)
+#define trace_rcu_invoke_callback(rcuname, rhp) do { } while (0)
+#define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0)
+#define trace_rcu_batch_end(rcuname, callbacks_invoked) do { } while (0)
+
+#endif /* #else #ifdef CONFIG_RCU_TRACE */
+
+#endif /* _TRACE_RCU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/regmap.h b/include/trace/events/regmap.h
new file mode 100644
index 0000000..1e3193b
--- /dev/null
+++ b/include/trace/events/regmap.h
@@ -0,0 +1,136 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regmap
+
+#if !defined(_TRACE_REGMAP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGMAP_H
+
+#include <linux/device.h>
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct regmap;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(regmap_reg,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( unsigned int, reg )
+ __field( unsigned int, val )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->reg = reg;
+ __entry->val = val;
+ ),
+
+ TP_printk("%s reg=%x val=%x", __get_str(name),
+ (unsigned int)__entry->reg,
+ (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_write,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
+DEFINE_EVENT(regmap_reg, regmap_reg_read,
+
+ TP_PROTO(struct device *dev, unsigned int reg,
+ unsigned int val),
+
+ TP_ARGS(dev, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(regmap_block,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( unsigned int, reg )
+ __field( int, count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->reg = reg;
+ __entry->count = count;
+ ),
+
+ TP_printk("%s reg=%x count=%d", __get_str(name),
+ (unsigned int)__entry->reg,
+ (int)__entry->count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_read_done,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_start,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+DEFINE_EVENT(regmap_block, regmap_hw_write_done,
+
+ TP_PROTO(struct device *dev, unsigned int reg, int count),
+
+ TP_ARGS(dev, reg, count)
+);
+
+TRACE_EVENT(regcache_sync,
+
+ TP_PROTO(struct device *dev, const char *type,
+ const char *status),
+
+ TP_ARGS(dev, type, status),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __string( status, status )
+ __string( type, type )
+ __field( int, type )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __assign_str(status, status);
+ __assign_str(type, type);
+ ),
+
+ TP_printk("%s type=%s status=%s", __get_str(name),
+ __get_str(type), __get_str(status))
+);
+
+#endif /* _TRACE_REGMAP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
new file mode 100644
index 0000000..d62c558
--- /dev/null
+++ b/include/trace/events/rpm.h
@@ -0,0 +1,99 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpm
+
+#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RUNTIME_POWER_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/device.h>
+
+/*
+ * The rpm_internal events are used for tracing some important
+ * runtime pm internal functions.
+ */
+DECLARE_EVENT_CLASS(rpm_internal,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev) )
+ __field( int, flags )
+ __field( int , usage_count )
+ __field( int , disable_depth )
+ __field( int , runtime_auto )
+ __field( int , request_pending )
+ __field( int , irq_safe )
+ __field( int , child_count )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->flags = flags;
+ __entry->usage_count = atomic_read(
+ &dev->power.usage_count);
+ __entry->disable_depth = dev->power.disable_depth;
+ __entry->runtime_auto = dev->power.runtime_auto;
+ __entry->request_pending = dev->power.request_pending;
+ __entry->irq_safe = dev->power.irq_safe;
+ __entry->child_count = atomic_read(
+ &dev->power.child_count);
+ ),
+
+ TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
+ " irq-%-1d child-%d",
+ __get_str(name), __entry->flags,
+ __entry->usage_count,
+ __entry->disable_depth,
+ __entry->runtime_auto,
+ __entry->request_pending,
+ __entry->irq_safe,
+ __entry->child_count
+ )
+);
+DEFINE_EVENT(rpm_internal, rpm_suspend,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_resume,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+DEFINE_EVENT(rpm_internal, rpm_idle,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
+
+TRACE_EVENT(rpm_return_int,
+ TP_PROTO(struct device *dev, unsigned long ip, int ret),
+ TP_ARGS(dev, ip, ret),
+
+ TP_STRUCT__entry(
+ __string( name, dev_name(dev))
+ __field( unsigned long, ip )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->ip = ip;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
+ __entry->ret)
+);
+
+#endif /* _TRACE_RUNTIME_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f633478..959ff18 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
* For all intents and purposes a preempted task is a running task.
*/
if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
- state = TASK_RUNNING;
+ state = TASK_RUNNING | TASK_STATE_MAX;
#endif
return state;
@@ -137,13 +137,14 @@ TRACE_EVENT(sched_switch,
__entry->next_prio = next->prio;
),
- TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
+ TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
- __entry->prev_state ?
- __print_flags(__entry->prev_state, "|",
+ __entry->prev_state & (TASK_STATE_MAX-1) ?
+ __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
{ 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
{ 16, "Z" }, { 32, "X" }, { 64, "x" },
{ 128, "W" }) : "R",
+ __entry->prev_state & TASK_STATE_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 533c49f..7697249 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -711,6 +711,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef __perf_count
#define __perf_count(c) __count = (c)
+#undef TP_perf_assign
+#define TP_perf_assign(args...) args
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 76f7538..d29c153 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -25,8 +25,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
-int alloc_xenballooned_pages(int nr_pages, struct page** pages);
-void free_xenballooned_pages(int nr_pages, struct page** pages);
+int alloc_xenballooned_pages(int nr_pages, struct page **pages,
+ bool highmem);
+void free_xenballooned_pages(int nr_pages, struct page **pages);
struct sys_device;
#ifdef CONFIG_XEN_SELFBALLOONING
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index b1fab6b..6b99bfb 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -156,6 +156,7 @@ unsigned int gnttab_max_grant_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 99fcffb..f0b6890 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -26,7 +26,11 @@ enum xsd_sockmsg_type
XS_SET_PERMS,
XS_WATCH_EVENT,
XS_ERROR,
- XS_IS_DOMAIN_INTRODUCED
+ XS_IS_DOMAIN_INTRODUCED,
+ XS_RESUME,
+ XS_SET_TARGET,
+ XS_RESTRICT,
+ XS_RESET_WATCHES
};
#define XS_WRITE_NONE "NONE"
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 534cac8..c1080d9 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -109,6 +109,7 @@ struct physdev_irq {
#define MAP_PIRQ_TYPE_MSI 0x0
#define MAP_PIRQ_TYPE_GSI 0x1
#define MAP_PIRQ_TYPE_UNKNOWN 0x2
+#define MAP_PIRQ_TYPE_MSI_SEG 0x3
#define PHYSDEVOP_map_pirq 13
struct physdev_map_pirq {
@@ -119,7 +120,7 @@ struct physdev_map_pirq {
int index;
/* IN or OUT */
int pirq;
- /* IN */
+ /* IN - high 16 bits hold segment for MAP_PIRQ_TYPE_MSI_SEG */
int bus;
/* IN */
int devfn;
@@ -198,6 +199,37 @@ struct physdev_get_free_pirq {
uint32_t pirq;
};
+#define XEN_PCI_DEV_EXTFN 0x1
+#define XEN_PCI_DEV_VIRTFN 0x2
+#define XEN_PCI_DEV_PXM 0x4
+
+#define PHYSDEVOP_pci_device_add 25
+struct physdev_pci_device_add {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+ uint32_t flags;
+ struct {
+ uint8_t bus;
+ uint8_t devfn;
+ } physfn;
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ uint32_t optarr[];
+#elif defined(__GNUC__)
+ uint32_t optarr[0];
+#endif
+};
+
+#define PHYSDEVOP_pci_device_remove 26
+#define PHYSDEVOP_restore_msi_ext 27
+struct physdev_pci_device {
+ /* IN */
+ uint16_t seg;
+ uint8_t bus;
+ uint8_t devfn;
+};
+
/*
* Notify that some PIRQ-bound event channels have been unmasked.
* ** This command is obsolete since interface version 0x00030202 and is **
diff --git a/include/xen/page.h b/include/xen/page.h
index 0be36b9..12765b6 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,6 +3,16 @@
#include <asm/xen/page.h>
-extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
+struct xen_memory_region {
+ phys_addr_t start;
+ phys_addr_t size;
+};
+
+#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
+
+extern __initdata
+struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS];
+
+extern unsigned long xen_released_pages;
#endif /* _XEN_PAGE_H */