From a2daff6803a384ce065e3681a2affea1da59c5f5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 31 May 2011 14:09:00 -0700 Subject: fuse: fix non-ANSI void function notation Fix void function parameter list sparse warning: fs/fuse/inode.c:74:44: warning: non-ANSI function declaration of function 'fuse_alloc_forget' Signed-off-by: Randy Dunlap Signed-off-by: Miklos Szeredi diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index cc6ec4b..5354906e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -71,7 +71,7 @@ struct fuse_mount_data { unsigned blksize; }; -struct fuse_forget_link *fuse_alloc_forget() +struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); } -- cgit v0.10.2 From 196cfe2ae8fcdc03b3c7d627e7dfe8c0ce7229f9 Mon Sep 17 00:00:00 2001 From: Stefan Bader Date: Thu, 14 Jul 2011 15:30:22 +0200 Subject: xen-blkfront: Drop name and minor adjustments for emulated scsi devices These were intended to avoid the namespace clash when representing emulated IDE and SCSI devices. However that seems to confuse users more than expected (a disk defined as sda becomes xvde). So for now go back to the scheme which does no adjustments. This will break when mixing IDE and SCSI names in the configuration of guests but should be by now expected. Acked-by: Stefano Stabellini Signed-off-by: Stefan Bader Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b536a9c..238b941 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -123,8 +123,8 @@ static DEFINE_SPINLOCK(minor_lock); #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED)) #define EMULATED_HD_DISK_MINOR_OFFSET (0) #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256) -#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16)) -#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4) +#define EMULATED_SD_DISK_MINOR_OFFSET (0) +#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256) #define DEV_NAME "xvd" /* name in /dev */ -- cgit v0.10.2 From 89153b5cae9f40c224a5d321665a97bf14220c2c Mon Sep 17 00:00:00 2001 From: Stefan Bader Date: Thu, 14 Jul 2011 15:30:37 +0200 Subject: xen-blkfront: Fix one off warning about name clash Avoid telling users to use xvde and onwards when using xvde. Acked-by: Stefano Stabellini Signed-off-by: Stefan Bader Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 238b941..9ea8c25 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -529,7 +529,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, minor = BLKIF_MINOR_EXT(info->vdevice); nr_parts = PARTS_PER_EXT_DISK; offset = minor / nr_parts; - if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4) + if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4) printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with " "emulated IDE disks,\n\t choose an xvd device name" "from xvde on\n", info->vdevice); -- cgit v0.10.2 From 4f9bae351d299149a84f76cd34bf0150614e8c8e Mon Sep 17 00:00:00 2001 From: Han Pingtian Date: Mon, 18 Jul 2011 11:13:14 +0800 Subject: perf buildid-cache: Zero out buffer of filenames when adding/removing buildid The readlink() function doesn't append a null byte to buf. So we should zero out buf with zalloc(). Or we'll see sometimes error like this: [root@intel-s3e36-01]~# /usr/bin/perf buildid-cache -a /lib/modules/2.6.32-130.el6.x86_64/kernel/crypto/twofish_common.ko -v Adding f64ba8efd5f53c7ad332fc17db1d21de309038e1 /lib/modules/2.6.32-130.el6.x86_64/kernel/crypto/twofish_common.ko: Ok [root@intel-s3e36-01]~# /usr/bin/perf buildid-cache -r /lib/modules/2.6.32-130.el6.x86_64/kernel/crypto/twofish_common.ko -v Removing f64ba8efd5f53c7ad332fc17db1d21de309038e1 /lib/modules/2.6.32-130.el6.x86_64/kernel/crypto/twofish_common.ko: FAIL /lib/modules/2.6.32-130.el6.x86_64/kernel/crypto/twofish_common.ko wasn't in the cache The change in build_id_cache__add_s() is a defense. Tested-by: Jiri Olsa Cc: Jiri Olsa Link: http://lkml.kernel.org/r/20110718031314.GA5802@hpt.nay.redhat.com Signed-off-by: Han Pingtian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index cb2959a..d4f3101 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -189,8 +189,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, const char *name, bool is_kallsyms) { const size_t size = PATH_MAX; - char *realname, *filename = malloc(size), - *linkname = malloc(size), *targetname; + char *realname, *filename = zalloc(size), + *linkname = zalloc(size), *targetname; int len, err = -1; if (is_kallsyms) { @@ -254,8 +254,8 @@ static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) { const size_t size = PATH_MAX; - char *filename = malloc(size), - *linkname = malloc(size); + char *filename = zalloc(size), + *linkname = zalloc(size); int err = -1; if (filename == NULL || linkname == NULL) -- cgit v0.10.2 From 40c5cc263954444f5a76cbf25d408c42da480122 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sun, 24 Jul 2011 22:39:12 +0100 Subject: regmap: Fix bulk reads We should be reading the number of bytes we were asked for, not the size of a single register. Signed-off-by: Mark Brown diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index cf3565c..0eef4da 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -317,7 +317,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, u8[0] |= map->bus->read_flag_mask; ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, - val, map->format.val_bytes); + val, val_len); if (ret != 0) return ret; -- cgit v0.10.2 From f629299b544b6cc12b4e3e85fec96f4ce5809482 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 24 Jul 2011 23:15:42 +0200 Subject: trace events: Update version number reference to new 3.x scheme for EVENT_POWER_TRACING_DEPRECATED What was scheduled to be 2.6.41 is now going to be 3.1 . Signed-off-by: Jesper Juhl Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker Cc: Steven Rostedt Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1107250929370.8080@swampdragon.chaosbits.net Signed-off-by: Ingo Molnar diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2ad39e5..cd31345 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -82,7 +82,7 @@ config EVENT_POWER_TRACING_DEPRECATED power:power_frequency This is for userspace compatibility and will vanish after 5 kernel iterations, - namely 2.6.41. + namely 3.1. config CONTEXT_SWITCH_TRACER bool -- cgit v0.10.2 From 4152ab377b55e9d3e5700de00ef799519ead698d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Jul 2011 11:06:19 -0300 Subject: perf evlist: Introduce 'disable' method To remove the last case of access to the FD() macro outside the library. Inspired by a patch by Borislav that moved the FD() macro to util.h, for namespace concerns I rather preferred to constrain it to ev{sel,list}.c. Cc: Borislav Petkov Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-qn893qsstcg366tkucu649qj@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 80dc5b7..f6426b4 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -30,8 +30,6 @@ #include #include -#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) - enum write_mode_t { WRITE_FORCE, WRITE_APPEND @@ -438,7 +436,6 @@ static void mmap_read_all(void) static int __cmd_record(int argc, const char **argv) { - int i; struct stat st; int flags; int err; @@ -682,7 +679,6 @@ static int __cmd_record(int argc, const char **argv) for (;;) { int hits = samples; - int thread; mmap_read_all(); @@ -693,19 +689,8 @@ static int __cmd_record(int argc, const char **argv) waking++; } - if (done) { - for (i = 0; i < evsel_list->cpus->nr; i++) { - struct perf_evsel *pos; - - list_for_each_entry(pos, &evsel_list->entries, node) { - for (thread = 0; - thread < evsel_list->threads->nr; - thread++) - ioctl(FD(pos, i, thread), - PERF_EVENT_IOC_DISABLE); - } - } - } + if (done) + perf_evlist__disable(evsel_list); } if (quiet || signr == SIGUSR1) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index b021ea9..e03e7bc 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -91,6 +91,19 @@ int perf_evlist__add_default(struct perf_evlist *evlist) return 0; } +void perf_evlist__disable(struct perf_evlist *evlist) +{ + int cpu, thread; + struct perf_evsel *pos; + + for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + list_for_each_entry(pos, &evlist->entries, node) { + for (thread = 0; thread < evlist->threads->nr; thread++) + ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); + } + } +} + int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index b2b8623..ce85ae9 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -53,6 +53,8 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist); int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); void perf_evlist__munmap(struct perf_evlist *evlist); +void perf_evlist__disable(struct perf_evlist *evlist); + static inline void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, struct thread_map *threads) -- cgit v0.10.2 From ed43233be910bbc8b9da3d61aa1b931843d1b44e Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:39:39 +0000 Subject: xfs: Remove the macro XFS_BUF_BFLAGS Remove the definition of the macro XFS_BUF_BFLAGS and its usage. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index b2b4119..969fd15 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1094,7 +1094,7 @@ STATIC int xfs_bioerror_relse( struct xfs_buf *bp) { - int64_t fl = XFS_BUF_BFLAGS(bp); + int64_t fl = bp->b_flags; /* * No need to wait until the buffer is unpinned. * We aren't flushing it. diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 6a83b46..6b6c25f 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -232,7 +232,6 @@ extern void xfs_buf_terminate(void); ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) -#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) #define XFS_BUF_ZEROFLAGS(bp) \ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 15584fc..1bc04d4 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -430,7 +430,7 @@ shutdown_abort: if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) xfs_notice(mp, "about to pop assert, bp == 0x%p", bp); #endif - ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) != + ASSERT((bp->b_flags & (XBF_STALE|XBF_DELWRI)) != (XBF_STALE|XBF_DELWRI)); trace_xfs_trans_read_buf_shut(bp, _RET_IP_); -- cgit v0.10.2 From 5a52c2a581cddcb676a54a95d99cd39f5577c33b Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:39:51 +0000 Subject: xfs: Remove the macro XFS_BUF_ERROR and family Remove the definitions and usage of the macros XFS_BUF_ERROR, XFS_BUF_GETERROR and XFS_BUF_ISERROR. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 969fd15..704418a 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -596,7 +596,7 @@ _xfs_buf_read( bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); status = xfs_buf_iorequest(bp); - if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC)) + if (status || bp->b_error || (flags & XBF_ASYNC)) return status; return xfs_buf_iowait(bp); } @@ -1069,7 +1069,7 @@ xfs_bioerror( /* * No need to wait until the buffer is unpinned, we aren't flushing it. */ - XFS_BUF_ERROR(bp, EIO); + xfs_buf_ioerror(bp, EIO); /* * We're calling xfs_buf_ioend, so delete XBF_DONE flag. @@ -1115,7 +1115,7 @@ xfs_bioerror_relse( * There's no reason to mark error for * ASYNC buffers. */ - XFS_BUF_ERROR(bp, EIO); + xfs_buf_ioerror(bp, EIO); XFS_BUF_FINISH_IOWAIT(bp); } else { xfs_buf_relse(bp); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 6b6c25f..08a15c2 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -250,10 +250,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) -#define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no) -#define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp) -#define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0) - #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 837f311..784019d 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -403,7 +403,7 @@ xfs_qm_dqalloc( dqp->q_blkno, mp->m_quotainfo->qi_dqchunklen, 0); - if (!bp || (error = XFS_BUF_GETERROR(bp))) + if (!bp || (error = xfs_buf_geterror(bp))) goto error1; /* * Make a chunk of dquots out of this buffer and log diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 1e00b3e..bdd9cb5 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -451,8 +451,7 @@ xfs_alloc_read_agfl( XFS_FSS_TO_BB(mp, 1), 0, &bp); if (error) return error; - ASSERT(bp); - ASSERT(!XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF); *bpp = bp; return 0; @@ -2116,7 +2115,7 @@ xfs_read_agf( if (!*bpp) return 0; - ASSERT(!XFS_BUF_GETERROR(*bpp)); + ASSERT(!(*bpp)->b_error); agf = XFS_BUF_TO_AGF(*bpp); /* @@ -2168,7 +2167,7 @@ xfs_alloc_read_agf( return error; if (!*bpp) return 0; - ASSERT(!XFS_BUF_GETERROR(*bpp)); + ASSERT(!(*bpp)->b_error); agf = XFS_BUF_TO_AGF(*bpp); pag = xfs_perag_get(mp, agno); diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index cbae424..160bcdc 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -2121,8 +2121,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args) bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt, XBF_LOCK | XBF_DONT_BLOCK); - ASSERT(bp); - ASSERT(!XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen : XFS_BUF_SIZE(bp); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index cabf4b5..2b9fd38 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -275,8 +275,7 @@ xfs_btree_dup_cursor( return error; } new->bc_bufs[i] = bp; - ASSERT(bp); - ASSERT(!XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); } else new->bc_bufs[i] = NULL; } @@ -467,8 +466,7 @@ xfs_btree_get_bufl( ASSERT(fsbno != NULLFSBLOCK); d = XFS_FSB_TO_DADDR(mp, fsbno); bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); - ASSERT(bp); - ASSERT(!XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); return bp; } @@ -491,8 +489,7 @@ xfs_btree_get_bufs( ASSERT(agbno != NULLAGBLOCK); d = XFS_AGB_TO_DADDR(mp, agno, agbno); bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); - ASSERT(bp); - ASSERT(!XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); return bp; } @@ -632,7 +629,7 @@ xfs_btree_read_bufl( mp->m_bsize, lock, &bp))) { return error; } - ASSERT(!bp || !XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); if (bp) XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); *bpp = bp; @@ -973,8 +970,7 @@ xfs_btree_get_buf_block( *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize, flags); - ASSERT(*bpp); - ASSERT(!XFS_BUF_GETERROR(*bpp)); + ASSERT(!xfs_buf_geterror(*bpp)); *block = XFS_BUF_TO_BLOCK(*bpp); return 0; @@ -1006,8 +1002,7 @@ xfs_btree_read_buf_block( if (error) return error; - ASSERT(*bpp != NULL); - ASSERT(!XFS_BUF_GETERROR(*bpp)); + ASSERT(!xfs_buf_geterror(*bpp)); xfs_btree_set_refs(cur, *bpp); *block = XFS_BUF_TO_BLOCK(*bpp); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 8849291..38417ab 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -960,7 +960,7 @@ xfs_buf_iodone_callbacks( static ulong lasttime; static xfs_buftarg_t *lasttarg; - if (likely(!XFS_BUF_GETERROR(bp))) + if (likely(!xfs_buf_geterror(bp))) goto do_callbacks; /* @@ -991,7 +991,7 @@ xfs_buf_iodone_callbacks( * around. */ if (XFS_BUF_ISASYNC(bp)) { - XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */ + xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ if (!XFS_BUF_ISSTALE(bp)) { XFS_BUF_DELAYWRITE(bp); diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 2925726..5d9290d 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -2040,7 +2040,7 @@ xfs_da_do_buf( case 0: bp = xfs_trans_get_buf(trans, mp->m_ddev_targp, mappedbno, nmapped, 0); - error = bp ? XFS_BUF_GETERROR(bp) : XFS_ERROR(EIO); + error = bp ? bp->b_error : XFS_ERROR(EIO); break; case 1: case 2: diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index dd5628b..9f24ec2 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -202,8 +202,7 @@ xfs_ialloc_inode_init( fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize * blks_per_cluster, XBF_LOCK); - ASSERT(fbuf); - ASSERT(!XFS_BUF_GETERROR(fbuf)); + ASSERT(!xfs_buf_geterror(fbuf)); /* * Initialize all inodes in this buffer and then log them. @@ -1486,7 +1485,7 @@ xfs_read_agi( if (error) return error; - ASSERT(*bpp && !XFS_BUF_GETERROR(*bpp)); + ASSERT(!xfs_buf_geterror(*bpp)); agi = XFS_BUF_TO_AGI(*bpp); /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 3cc21dd..bdb47b2 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2473,7 +2473,7 @@ cluster_corrupt_out: if (bp->b_iodone) { XFS_BUF_UNDONE(bp); XFS_BUF_STALE(bp); - XFS_BUF_ERROR(bp,EIO); + xfs_buf_ioerror(bp, EIO); xfs_buf_ioend(bp, 0); } else { XFS_BUF_STALE(bp); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 06ff843..d1595e7 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -878,7 +878,7 @@ xlog_iodone(xfs_buf_t *bp) /* * Race to shutdown the filesystem if we see an error. */ - if (XFS_TEST_ERROR((XFS_BUF_GETERROR(bp)), l->l_mp, + if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp, XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) { xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp)); XFS_BUF_STALE(bp); @@ -1248,7 +1248,7 @@ xlog_bdstrat( struct xlog_in_core *iclog = bp->b_fspriv; if (iclog->ic_state & XLOG_STATE_IOERROR) { - XFS_BUF_ERROR(bp, EIO); + xfs_buf_ioerror(bp, EIO); XFS_BUF_STALE(bp); xfs_buf_ioend(bp, 0); /* diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 8fe4206..a8e0827 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -360,7 +360,7 @@ STATIC void xlog_recover_iodone( struct xfs_buf *bp) { - if (XFS_BUF_GETERROR(bp)) { + if (bp->b_error) { /* * We're not going to bother about retrying * this during recovery. One strike! @@ -2135,15 +2135,14 @@ xlog_recover_buffer_pass2( bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, buf_flags); - if (XFS_BUF_ISERROR(bp)) { + error = xfs_buf_geterror(bp); + if (error) { xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, bp, buf_f->blf_blkno); - error = XFS_BUF_GETERROR(bp); xfs_buf_relse(bp); return error; } - error = 0; if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); } else if (buf_f->blf_flags & @@ -2227,14 +2226,13 @@ xlog_recover_inode_pass2( bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, XBF_LOCK); - if (XFS_BUF_ISERROR(bp)) { + error = xfs_buf_geterror(bp); + if (error) { xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, bp, in_f->ilf_blkno); - error = XFS_BUF_GETERROR(bp); xfs_buf_relse(bp); goto error; } - error = 0; ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 8f76fdf..cb8132c 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -883,7 +883,7 @@ xfs_rtbuf_get( if (error) { return error; } - ASSERT(bp && !XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); *bpp = bp; return 0; } diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index d6d6fdf..d1f76f8 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -106,7 +106,7 @@ xfs_ioerror_alert( " (\"%s\") error %d buf count %zd", XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), (__uint64_t)blkno, func, - XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp)); + bp->b_error, XFS_BUF_COUNT(bp)); } /* @@ -137,8 +137,8 @@ xfs_read_buf( bp = xfs_buf_read(target, blkno, len, flags); if (!bp) return XFS_ERROR(EIO); - error = XFS_BUF_GETERROR(bp); - if (bp && !error && !XFS_FORCED_SHUTDOWN(mp)) { + error = bp->b_error; + if (!error && !XFS_FORCED_SHUTDOWN(mp)) { *bpp = bp; } else { *bpp = NULL; diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 1bc04d4..f9f1bf6 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -194,7 +194,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, return NULL; } - ASSERT(!XFS_BUF_GETERROR(bp)); + ASSERT(!bp->b_error); _xfs_trans_bjoin(tp, bp, 1); trace_xfs_trans_get_buf(bp->b_fspriv); @@ -293,10 +293,10 @@ xfs_trans_read_buf( return (flags & XBF_TRYLOCK) ? EAGAIN : XFS_ERROR(ENOMEM); - if (XFS_BUF_GETERROR(bp) != 0) { + if (bp->b_error) { + error = bp->b_error; xfs_ioerror_alert("xfs_trans_read_buf", mp, bp, blkno); - error = XFS_BUF_GETERROR(bp); xfs_buf_relse(bp); return error; } @@ -330,7 +330,7 @@ xfs_trans_read_buf( ASSERT(xfs_buf_islocked(bp)); ASSERT(bp->b_transp == tp); ASSERT(bp->b_fspriv != NULL); - ASSERT((XFS_BUF_ISERROR(bp)) == 0); + ASSERT(!bp->b_error); if (!(XFS_BUF_ISDONE(bp))) { trace_xfs_trans_read_buf_io(bp, _RET_IP_); ASSERT(!XFS_BUF_ISASYNC(bp)); @@ -386,10 +386,9 @@ xfs_trans_read_buf( return (flags & XBF_TRYLOCK) ? 0 : XFS_ERROR(ENOMEM); } - if (XFS_BUF_GETERROR(bp) != 0) { - XFS_BUF_SUPER_STALE(bp); - error = XFS_BUF_GETERROR(bp); - + if (bp->b_error) { + error = bp->b_error; + XFS_BUF_SUPER_STALE(bp); xfs_ioerror_alert("xfs_trans_read_buf", mp, bp, blkno); if (tp->t_flags & XFS_TRANS_DIRTY) diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 88d1214..3baebe2 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -83,7 +83,7 @@ xfs_readlink_bmap( bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); - error = XFS_BUF_GETERROR(bp); + error = xfs_buf_geterror(bp); if (error) { xfs_ioerror_alert("xfs_readlink", ip->i_mount, bp, XFS_BUF_ADDR(bp)); @@ -1648,7 +1648,7 @@ xfs_symlink( byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, BTOBB(byte_cnt), 0); - ASSERT(bp && !XFS_BUF_GETERROR(bp)); + ASSERT(!xfs_buf_geterror(bp)); if (pathlen < byte_cnt) { byte_cnt = pathlen; } -- cgit v0.10.2 From b75e40a4193ca027af7327ef30c31d45aa0a0e40 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:39:57 +0000 Subject: xfs: Remove macro XFS_BUF_BUSY and family Remove the definitions and uses of the macros XFS_BUF_BUSY, XFS_BUF_UNBUSY, and XFS_BUF_ISBUSY. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 704418a..ae2c2e7 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -679,7 +679,6 @@ xfs_buf_read_uncached( /* set up the buffer for a read IO */ XFS_BUF_SET_ADDR(bp, daddr); XFS_BUF_READ(bp); - XFS_BUF_BUSY(bp); xfsbdstrat(mp, bp); error = xfs_buf_iowait(bp); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 08a15c2..05e744f 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -254,10 +254,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) -#define XFS_BUF_BUSY(bp) do { } while (0) -#define XFS_BUF_UNBUSY(bp) do { } while (0) -#define XFS_BUF_ISBUSY(bp) (1) - #define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 784019d..0e12861 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -318,7 +318,6 @@ xfs_qm_init_dquot_blk( int curid, i; ASSERT(tp); - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); @@ -534,7 +533,6 @@ xfs_qm_dqtobp( return XFS_ERROR(error); } - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); /* @@ -553,7 +551,6 @@ xfs_qm_dqtobp( xfs_trans_brelse(tp, bp); return XFS_ERROR(EIO); } - XFS_BUF_BUSY(bp); /* We dirtied this */ } *O_bpp = bp; @@ -622,7 +619,6 @@ xfs_qm_dqread( * this particular dquot was repaired. We still aren't afraid to * brelse it because we have the changes incore. */ - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); xfs_trans_brelse(tp, bp); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 38417ab..9e9b4a7 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -371,7 +371,6 @@ xfs_buf_item_pin( { struct xfs_buf_log_item *bip = BUF_ITEM(lip); - ASSERT(XFS_BUF_ISBUSY(bip->bli_buf)); ASSERT(atomic_read(&bip->bli_refcount) > 0); ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || (bip->bli_flags & XFS_BLI_STALE)); @@ -895,7 +894,6 @@ xfs_buf_attach_iodone( { xfs_log_item_t *head_lip; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); lip->li_cb = cb; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index d1595e7..64682b6 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1051,7 +1051,6 @@ xlog_alloc_log(xfs_mount_t *mp, if (!bp) goto out_free_log; bp->b_iodone = xlog_iodone; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); log->l_xbuf = bp; @@ -1108,7 +1107,6 @@ xlog_alloc_log(xfs_mount_t *mp, iclog->ic_callback_tail = &(iclog->ic_callback); iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; - ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); ASSERT(xfs_buf_islocked(iclog->ic_bp)); init_waitqueue_head(&iclog->ic_force_wait); init_waitqueue_head(&iclog->ic_write_wait); @@ -1355,7 +1353,6 @@ xlog_sync(xlog_t *log, XFS_BUF_SET_COUNT(bp, count); bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); - XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); bp->b_flags |= XBF_SYNCIO; @@ -1402,7 +1399,6 @@ xlog_sync(xlog_t *log, (__psint_t)count), split); bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); - XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); bp->b_flags |= XBF_SYNCIO; if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index a8e0827..4c8a892 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -178,7 +178,6 @@ xlog_bread_noalign( XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); XFS_BUF_READ(bp); - XFS_BUF_BUSY(bp); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); @@ -266,7 +265,6 @@ xlog_bwrite( XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); XFS_BUF_ZEROFLAGS(bp); - XFS_BUF_BUSY(bp); XFS_BUF_HOLD(bp); xfs_buf_lock(bp); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index f9f1bf6..7dd62e2 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -80,7 +80,6 @@ _xfs_trans_bjoin( { struct xfs_buf_log_item *bip; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == NULL); /* @@ -580,7 +579,6 @@ xfs_trans_bhold(xfs_trans_t *tp, { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); @@ -601,7 +599,6 @@ xfs_trans_bhold_release(xfs_trans_t *tp, { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); @@ -630,7 +627,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); @@ -701,7 +697,6 @@ xfs_trans_binval( { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -773,7 +768,6 @@ xfs_trans_inode_buf( { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -797,7 +791,6 @@ xfs_trans_stale_inode_buf( { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -822,7 +815,6 @@ xfs_trans_inode_alloc_buf( { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -850,7 +842,6 @@ xfs_trans_dquot_buf( { xfs_buf_log_item_t *bip = bp->b_fspriv; - ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT(type == XFS_BLF_UDQUOT_BUF || -- cgit v0.10.2 From 72790aa1192f46dedfc827c170365fd554981d15 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:04 +0000 Subject: xfs: Remove macro XFS_BUF_HOLD Remove the definition and usage of the macro XFS_BUF_HOLD Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 05e744f..6691a02 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -258,7 +258,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) -#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 9e9b4a7..a6dd497 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -484,7 +484,7 @@ xfs_buf_item_trylock( return XFS_ITEM_LOCKED; /* take a reference to the buffer. */ - XFS_BUF_HOLD(bp); + xfs_buf_hold(bp); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); trace_xfs_buf_item_trylock(bip); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 4c8a892..536eb0d 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -265,7 +265,7 @@ xlog_bwrite( XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); XFS_BUF_ZEROFLAGS(bp); - XFS_BUF_HOLD(bp); + xfs_buf_hold(bp); xfs_buf_lock(bp); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 7f25245..b00c808 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1938,7 +1938,7 @@ xfs_getsb( xfs_buf_lock(bp); } - XFS_BUF_HOLD(bp); + xfs_buf_hold(bp); ASSERT(XFS_BUF_ISDONE(bp)); return bp; } -- cgit v0.10.2 From 0095a21eb6ae8ac9f9860aa26029fe6ebbd3beeb Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:09 +0000 Subject: xfs: Remove macro XFS_BUF_SET_START Remove the definition and usage of the macro XFS_BUF_SET_START. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 6691a02..4e8a6ca 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -266,8 +266,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) -#define XFS_BUF_SET_START(bp) do { } while (0) - #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) #define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) #define XFS_BUF_ADDR(bp) ((bp)->b_bn) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a6dd497..bd4c62b 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -994,7 +994,6 @@ xfs_buf_iodone_callbacks( if (!XFS_BUF_ISSTALE(bp)) { XFS_BUF_DELAYWRITE(bp); XFS_BUF_DONE(bp); - XFS_BUF_SET_START(bp); } ASSERT(bp->b_iodone != NULL); trace_xfs_buf_item_iodone_async(bp, _RET_IP_); -- cgit v0.10.2 From 6292604447ade7d150f5eba3b1518e1a224fda15 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:15 +0000 Subject: xfs: Remove the macro XFS_BUF_PTR Remove the definition and usages of the macro XFS_BUF_PTR. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ae2c2e7..6a42f71 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1320,7 +1320,7 @@ xfs_buf_offset( struct page *page; if (bp->b_flags & XBF_MAPPED) - return XFS_BUF_PTR(bp) + offset; + return bp->b_addr + offset; offset += bp->b_offset; page = bp->b_pages[offset >> PAGE_SHIFT]; diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 4e8a6ca..f0aa947 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -266,7 +266,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) -#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) #define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) #define XFS_BUF_ADDR(bp) ((bp)->b_bn) #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 0e12861..2e06292 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -320,7 +320,7 @@ xfs_qm_init_dquot_blk( ASSERT(tp); ASSERT(xfs_buf_islocked(bp)); - d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); + d = bp->b_addr; /* * ID of the first dquot in the block - id's are zero based. @@ -538,7 +538,7 @@ xfs_qm_dqtobp( /* * calculate the location of the dquot inside the buffer. */ - ddq = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset); + ddq = bp->b_addr + dqp->q_bufoffset; /* * A simple sanity check in case we got a corrupted dquot... @@ -1200,7 +1200,7 @@ xfs_qm_dqflush( /* * Calculate the location of the dquot inside the buffer. */ - ddqp = (struct xfs_disk_dquot *)(XFS_BUF_PTR(bp) + dqp->q_bufoffset); + ddqp = bp->b_addr + dqp->q_bufoffset; /* * A simple sanity check in case we got a corrupted dquot.. diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 46e54ad..9a0aa76 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -1240,7 +1240,7 @@ xfs_qm_reset_dqcounts( do_div(j, sizeof(xfs_dqblk_t)); ASSERT(mp->m_quotainfo->qi_dqperchunk == j); #endif - ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp); + ddq = bp->b_addr; for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { /* * Do a sanity check, and if needed, repair the dqblk. Don't diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 6530769..4805f00 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -103,7 +103,7 @@ typedef struct xfs_agf { /* disk block (xfs_daddr_t) in the AG */ #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) #define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) -#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp)) +#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr)) extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp, xfs_agnumber_t agno, int flags, struct xfs_buf **bpp); @@ -156,7 +156,7 @@ typedef struct xfs_agi { /* disk block (xfs_daddr_t) in the AG */ #define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log)) #define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) -#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp)) +#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr)) extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, xfs_agnumber_t agno, struct xfs_buf **bpp); @@ -168,7 +168,7 @@ extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, #define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log)) #define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp)) #define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t)) -#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)XFS_BUF_PTR(bp)) +#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr)) typedef struct xfs_agfl { __be32 agfl_bno[1]; /* actually XFS_AGFL_SIZE(mp) */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index c51a3f9..25cb2b2 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -3384,8 +3384,7 @@ xfs_bmap_local_to_extents( ASSERT(args.len == 1); *firstblock = args.fsbno; bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); - memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data, - ifp->if_bytes); + memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 8d05a6a..5b240de 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -262,7 +262,7 @@ typedef struct xfs_btree_cur /* * Convert from buffer to btree block header. */ -#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)XFS_BUF_PTR(bp)) +#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr)) /* diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index bd4c62b..a16c24c 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -124,9 +124,9 @@ xfs_buf_item_log_check( bp = bip->bli_buf; ASSERT(XFS_BUF_COUNT(bp) > 0); - ASSERT(XFS_BUF_PTR(bp) != NULL); + ASSERT(bp->b_addr != NULL); orig = bip->bli_orig; - buffer = XFS_BUF_PTR(bp); + buffer = bp->b_addr; for (x = 0; x < XFS_BUF_COUNT(bp); x++) { if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { xfs_emerg(bp->b_mount, @@ -725,7 +725,7 @@ xfs_buf_item_init( * to have logged. */ bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); - memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp)); + memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp)); bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); #endif diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 5d9290d..d56ccb7 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -2258,7 +2258,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps) dabuf->nbuf = 1; bp = bps[0]; dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp)); - dabuf->data = XFS_BUF_PTR(bp); + dabuf->data = bp->b_addr; dabuf->bps[0] = bp; } else { dabuf->nbuf = nbuf; @@ -2269,7 +2269,7 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps) dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP); for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) { bp = bps[i]; - memcpy((char *)dabuf->data + off, XFS_BUF_PTR(bp), + memcpy((char *)dabuf->data + off, bp->b_addr, XFS_BUF_COUNT(bp)); } } @@ -2292,8 +2292,8 @@ xfs_da_buf_clean(xfs_dabuf_t *dabuf) for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) { bp = dabuf->bps[i]; - memcpy(XFS_BUF_PTR(bp), (char *)dabuf->data + off, - XFS_BUF_COUNT(bp)); + memcpy(bp->b_addr, dabuf->data + off, + XFS_BUF_COUNT(bp)); } } } @@ -2330,7 +2330,7 @@ xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last) ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); if (dabuf->nbuf == 1) { - ASSERT(dabuf->data == (void *)XFS_BUF_PTR(dabuf->bps[0])); + ASSERT(dabuf->data == dabuf->bps[0]->b_addr); xfs_trans_log_buf(tp, dabuf->bps[0], first, last); return; } diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index dffba9b..a372163 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -148,7 +148,7 @@ typedef enum xfs_dinode_fmt { be32_to_cpu((dip)->di_nextents) : \ be16_to_cpu((dip)->di_anextents)) -#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) +#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)((bp)->b_addr)) /* * For block and character special files the 32bit dev_t is stored at the diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 64682b6..3f1fa7b 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1403,7 +1403,7 @@ xlog_sync(xlog_t *log, bp->b_flags |= XBF_SYNCIO; if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) bp->b_flags |= XBF_FUA; - dptr = XFS_BUF_PTR(bp); + dptr = bp->b_addr; /* * Bump the cycle numbers at the start of each block * since this part of the buffer is at the start of diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 536eb0d..1ac295d 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -147,7 +147,7 @@ xlog_align( xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp)); - return XFS_BUF_PTR(bp) + BBTOB(offset); + return bp->b_addr + BBTOB(offset); } @@ -219,7 +219,7 @@ xlog_bread_offset( xfs_buf_t *bp, xfs_caddr_t offset) { - xfs_caddr_t orig_offset = XFS_BUF_PTR(bp); + xfs_caddr_t orig_offset = bp->b_addr; int orig_len = bp->b_buffer_length; int error, error2; @@ -1260,7 +1260,7 @@ xlog_write_log_records( */ ealign = round_down(end_block, sectbb); if (j == 0 && (start_block + endcount > ealign)) { - offset = XFS_BUF_PTR(bp) + BBTOB(ealign - start_block); + offset = bp->b_addr + BBTOB(ealign - start_block); error = xlog_bread_offset(log, ealign, sectbb, bp, offset); if (error) @@ -3433,7 +3433,7 @@ xlog_do_recovery_pass( /* * Check for header wrapping around physical end-of-log */ - offset = XFS_BUF_PTR(hbp); + offset = hbp->b_addr; split_hblks = 0; wrapped_hblks = 0; if (blk_no + hblks <= log->l_logBBsize) { @@ -3493,7 +3493,7 @@ xlog_do_recovery_pass( } else { /* This log record is split across the * physical end of log */ - offset = XFS_BUF_PTR(dbp); + offset = dbp->b_addr; split_bblks = 0; if (blk_no != log->l_logBBsize) { /* some data is before the physical diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index cb8132c..35561a5 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -168,7 +168,7 @@ error_cancel: xfs_trans_cancel(tp, cancelflags); goto error; } - memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); + memset(bp->b_addr, 0, mp->m_sb.sb_blocksize); xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); /* * Commit the transaction. @@ -943,7 +943,7 @@ xfs_rtcheck_range( if (error) { return error; } - bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + bufp = bp->b_addr; /* * Compute the starting word's address, and starting bit. */ @@ -994,7 +994,7 @@ xfs_rtcheck_range( if (error) { return error; } - b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + b = bufp = bp->b_addr; word = 0; } else { /* @@ -1040,7 +1040,7 @@ xfs_rtcheck_range( if (error) { return error; } - b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + b = bufp = bp->b_addr; word = 0; } else { /* @@ -1158,7 +1158,7 @@ xfs_rtfind_back( if (error) { return error; } - bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + bufp = bp->b_addr; /* * Get the first word's index & point to it. */ @@ -1210,7 +1210,7 @@ xfs_rtfind_back( if (error) { return error; } - bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + bufp = bp->b_addr; word = XFS_BLOCKWMASK(mp); b = &bufp[word]; } else { @@ -1256,7 +1256,7 @@ xfs_rtfind_back( if (error) { return error; } - bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + bufp = bp->b_addr; word = XFS_BLOCKWMASK(mp); b = &bufp[word]; } else { @@ -1333,7 +1333,7 @@ xfs_rtfind_forw( if (error) { return error; } - bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + bufp = bp->b_addr; /* * Get the first word's index & point to it. */ @@ -1384,7 +1384,7 @@ xfs_rtfind_forw( if (error) { return error; } - b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + b = bufp = bp->b_addr; word = 0; } else { /* @@ -1429,7 +1429,7 @@ xfs_rtfind_forw( if (error) { return error; } - b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + b = bufp = bp->b_addr; word = 0; } else { /* @@ -1649,7 +1649,7 @@ xfs_rtmodify_range( if (error) { return error; } - bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + bufp = bp->b_addr; /* * Compute the starting word's address, and starting bit. */ @@ -1694,7 +1694,7 @@ xfs_rtmodify_range( if (error) { return error; } - first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + first = b = bufp = bp->b_addr; word = 0; } else { /* @@ -1734,7 +1734,7 @@ xfs_rtmodify_range( if (error) { return error; } - first = b = bufp = (xfs_rtword_t *)XFS_BUF_PTR(bp); + first = b = bufp = bp->b_addr; word = 0; } else { /* @@ -1832,8 +1832,8 @@ xfs_rtmodify_summary( */ sp = XFS_SUMPTR(mp, bp, so); *sp += delta; - xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)XFS_BUF_PTR(bp)), - (uint)((char *)sp - (char *)XFS_BUF_PTR(bp) + sizeof(*sp) - 1)); + xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr), + (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1)); return 0; } diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h index 09e1f4f..f7f3a35 100644 --- a/fs/xfs/xfs_rtalloc.h +++ b/fs/xfs/xfs_rtalloc.h @@ -47,7 +47,7 @@ struct xfs_trans; #define XFS_SUMOFFSTOBLOCK(mp,s) \ (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog) #define XFS_SUMPTR(mp,bp,so) \ - ((xfs_suminfo_t *)((char *)XFS_BUF_PTR(bp) + \ + ((xfs_suminfo_t *)((bp)->b_addr + \ (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp)))) #define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log) diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index 1eb2ba5..cb6ae71 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h @@ -509,7 +509,7 @@ static inline int xfs_sb_version_hasprojid32bit(xfs_sb_t *sbp) #define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ #define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR) -#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)XFS_BUF_PTR(bp)) +#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr)) #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 3baebe2..3ee5f8a 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -94,7 +94,7 @@ xfs_readlink_bmap( byte_cnt = pathlen; pathlen -= byte_cnt; - memcpy(link, XFS_BUF_PTR(bp), byte_cnt); + memcpy(link, bp->b_addr, byte_cnt); xfs_buf_relse(bp); } @@ -1654,7 +1654,7 @@ xfs_symlink( } pathlen -= byte_cnt; - memcpy(XFS_BUF_PTR(bp), cur_chunk, byte_cnt); + memcpy(bp->b_addr, cur_chunk, byte_cnt); cur_chunk += byte_cnt; xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1); @@ -1999,7 +1999,7 @@ xfs_zero_remaining_bytes( mp, bp, XFS_BUF_ADDR(bp)); break; } - memset(XFS_BUF_PTR(bp) + + memset(bp->b_addr + (offset - XFS_FSB_TO_B(mp, imap.br_startoff)), 0, lastoffset - offset + 1); XFS_BUF_UNDONE(bp); -- cgit v0.10.2 From 02fe03d909f3a5876d7b4775fdbc83c07c7c3842 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:22 +0000 Subject: xfs: Remove the macro XFS_BUF_SET_PTR Remove the definition and usages of the macro XFS_BUF_SET_PTR. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index f0aa947..c5601e1 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -266,7 +266,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) -#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) #define XFS_BUF_ADDR(bp) ((bp)->b_bn) #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) #define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 3f1fa7b..3a8d4f6 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1395,8 +1395,8 @@ xlog_sync(xlog_t *log, if (split) { bp = iclog->ic_log->l_xbuf; XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ - XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ - (__psint_t)count), split); + xfs_buf_associate_memory(bp, + (char *)&iclog->ic_header + count, split); bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); XFS_BUF_ASYNC(bp); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1ac295d..aaf61d5 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -223,14 +223,14 @@ xlog_bread_offset( int orig_len = bp->b_buffer_length; int error, error2; - error = XFS_BUF_SET_PTR(bp, offset, BBTOB(nbblks)); + error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks)); if (error) return error; error = xlog_bread_noalign(log, blk_no, nbblks, bp); /* must reset buffer pointer even on error */ - error2 = XFS_BUF_SET_PTR(bp, orig_offset, orig_len); + error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len); if (error) return error; return error2; -- cgit v0.10.2 From 811e64c7169bb59229971c4aa3b1ed5093f44c84 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:27 +0000 Subject: Replace the macro XFS_BUF_ISPINNED with helper xfs_buf_ispinned Replace the macro XFS_BUF_ISPINNED with an inline helper function xfs_buf_ispinned, and change all its usages. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 6a42f71..5e929f0 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1677,7 +1677,7 @@ xfs_buf_delwri_split( list_for_each_entry_safe(bp, n, dwq, b_list) { ASSERT(bp->b_flags & XBF_DELWRI); - if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) { + if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) { if (!force && time_before(jiffies, bp->b_queuetime + age)) { xfs_buf_unlock(bp); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index c5601e1..f4e3de6 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -285,7 +285,10 @@ xfs_buf_set_ref( #define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref) #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) -#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) +static inline int xfs_buf_ispinned(struct xfs_buf *bp) +{ + return atomic_read(&bp->b_pin_count); +} #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 5cc158e..a8500e9 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -330,7 +330,7 @@ xfs_sync_fsdata( * between there and here. */ bp = xfs_getsb(mp, 0); - if (XFS_BUF_ISPINNED(bp)) + if (xfs_buf_ispinned(bp)) xfs_log_force(mp, 0); return xfs_bwrite(mp, bp); diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 2e06292..db62959 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -1236,7 +1236,7 @@ xfs_qm_dqflush( * If the buffer is pinned then push on the log so we won't * get stuck waiting in the write for too long. */ - if (XFS_BUF_ISPINNED(bp)) { + if (xfs_buf_ispinned(bp)) { trace_xfs_dqflush_force(dqp); xfs_log_force(mp, 0); } @@ -1443,7 +1443,7 @@ xfs_qm_dqflock_pushbuf_wait( goto out_lock; if (XFS_BUF_ISDELAYWRITE(bp)) { - if (XFS_BUF_ISPINNED(bp)) + if (xfs_buf_ispinned(bp)) xfs_log_force(mp, 0); xfs_buf_delwri_promote(bp); wake_up_process(bp->b_target->bt_task); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a16c24c..a3d2bbc 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -478,7 +478,7 @@ xfs_buf_item_trylock( struct xfs_buf_log_item *bip = BUF_ITEM(lip); struct xfs_buf *bp = bip->bli_buf; - if (XFS_BUF_ISPINNED(bp)) + if (xfs_buf_ispinned(bp)) return XFS_ITEM_PINNED; if (!xfs_buf_trylock(bp)) return XFS_ITEM_LOCKED; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index bdb47b2..76ee2c5 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2585,7 +2585,7 @@ xfs_iflush( * If the buffer is pinned then push on the log now so we won't * get stuck waiting in the write for too long. */ - if (XFS_BUF_ISPINNED(bp)) + if (xfs_buf_ispinned(bp)) xfs_log_force(mp, 0); /* -- cgit v0.10.2 From e38c9b87e5b428b3e2a2e48ab0ee2b6cdc8e6208 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:33 +0000 Subject: xfs: Remove the macro XFS_BUF_SET_TARGET Remove the macro XFS_BUF_SET_TARGET. hch: As all the buffer allocator already set ->b_target it should be safe to simply remove these calls. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index f4e3de6..6a38b2d 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -292,7 +292,6 @@ static inline int xfs_buf_ispinned(struct xfs_buf *bp) #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); -#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) #define XFS_BUF_TARGET(bp) ((bp)->b_target) #define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index aaf61d5..93786e5 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -179,7 +179,6 @@ xlog_bread_noalign( XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no); XFS_BUF_READ(bp); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); - XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); xfsbdstrat(log->l_mp, bp); error = xfs_buf_iowait(bp); @@ -268,7 +267,6 @@ xlog_bwrite( xfs_buf_hold(bp); xfs_buf_lock(bp); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); - XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); if ((error = xfs_bwrite(log->l_mp, bp))) xfs_ioerror_alert("xlog_bwrite", log->l_mp, -- cgit v0.10.2 From 49074c069cd3f0f683325d0c7f8b2765dbe2e294 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:40 +0000 Subject: xfs: Remove the macro XFS_BUF_TARGET Remove the definition and usages of the macro XFS_BUF_TARGET Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 6a38b2d..4b30499 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -292,7 +292,6 @@ static inline int xfs_buf_ispinned(struct xfs_buf *bp) #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); -#define XFS_BUF_TARGET(bp) ((bp)->b_target) #define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) static inline void xfs_buf_relse(xfs_buf_t *bp) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index a3d2bbc..5c2b554 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -971,14 +971,14 @@ xfs_buf_iodone_callbacks( goto do_callbacks; } - if (XFS_BUF_TARGET(bp) != lasttarg || + if (bp->b_target != lasttarg || time_after(jiffies, (lasttime + 5*HZ))) { lasttime = jiffies; xfs_alert(mp, "Device %s: metadata write error block 0x%llx", - XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), + XFS_BUFTARG_NAME(bp->b_target), (__uint64_t)XFS_BUF_ADDR(bp)); } - lasttarg = XFS_BUF_TARGET(bp); + lasttarg = bp->b_target; /* * If the write was asynchronous then no one will be looking for the diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index b00c808..49ecc17 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1615,7 +1615,7 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) XFS_BUF_UNDELAYWRITE(sbp); XFS_BUF_WRITE(sbp); XFS_BUF_UNASYNC(sbp); - ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); + ASSERT(sbp->b_target == mp->m_ddev_targp); xfsbdstrat(mp, sbp); error = xfs_buf_iowait(sbp); if (error) diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index d1f76f8..7382bfe 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -104,7 +104,7 @@ xfs_ioerror_alert( xfs_alert(mp, "I/O error occurred: meta-data dev %s block 0x%llx" " (\"%s\") error %d buf count %zd", - XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), + XFS_BUFTARG_NAME(bp->b_target), (__uint64_t)blkno, func, bp->b_error, XFS_BUF_COUNT(bp)); } diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 7dd62e2..137e2b9 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -54,7 +54,7 @@ xfs_trans_buf_item_match( list_for_each_entry(lidp, &tp->t_items, lid_trans) { blip = (struct xfs_buf_log_item *)lidp->lid_item; if (blip->bli_item.li_type == XFS_LI_BUF && - XFS_BUF_TARGET(blip->bli_buf) == target && + blip->bli_buf->b_target == target && XFS_BUF_ADDR(blip->bli_buf) == blkno && XFS_BUF_COUNT(blip->bli_buf) == len) return blip->bli_buf; -- cgit v0.10.2 From c35a549c8b9e85bdff7e531a410d10e36b4b4f32 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Fri, 22 Jul 2011 23:40:46 +0000 Subject: xfs: Remove the macro XFS_BUFTARG_NAME Remove the definition and usages of the macro XFS_BUFTARG_NAME. Signed-off-by: Chandra Seetharaman Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 5e929f0..6bddce4 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1480,7 +1480,7 @@ xfs_setsize_buftarg_flags( if (set_blocksize(btp->bt_bdev, sectorsize)) { xfs_warn(btp->bt_mount, "Cannot set_blocksize to %u on device %s\n", - sectorsize, XFS_BUFTARG_NAME(btp)); + sectorsize, xfs_buf_target_name(btp)); return EINVAL; } diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 4b30499..620972b 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -228,8 +228,13 @@ extern void xfs_buf_delwri_promote(xfs_buf_t *); extern int xfs_buf_init(void); extern void xfs_buf_terminate(void); -#define xfs_buf_target_name(target) \ - ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; }) +static inline const char * +xfs_buf_target_name(struct xfs_buftarg *target) +{ + static char __b[BDEVNAME_SIZE]; + + return bdevname(target->bt_bdev, __b); +} #define XFS_BUF_ZEROFLAGS(bp) \ @@ -292,8 +297,6 @@ static inline int xfs_buf_ispinned(struct xfs_buf *bp) #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); -#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) - static inline void xfs_buf_relse(xfs_buf_t *bp) { xfs_buf_unlock(bp); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 5c2b554..0402173 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -975,7 +975,7 @@ xfs_buf_iodone_callbacks( time_after(jiffies, (lasttime + 5*HZ))) { lasttime = jiffies; xfs_alert(mp, "Device %s: metadata write error block 0x%llx", - XFS_BUFTARG_NAME(bp->b_target), + xfs_buf_target_name(bp->b_target), (__uint64_t)XFS_BUF_ADDR(bp)); } lasttarg = bp->b_target; diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index 7382bfe..c96a8a0 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -104,7 +104,7 @@ xfs_ioerror_alert( xfs_alert(mp, "I/O error occurred: meta-data dev %s block 0x%llx" " (\"%s\") error %d buf count %zd", - XFS_BUFTARG_NAME(bp->b_target), + xfs_buf_target_name(bp->b_target), (__uint64_t)blkno, func, bp->b_error, XFS_BUF_COUNT(bp)); } -- cgit v0.10.2 From 3e9f45a7a4179604ccbae1589de0e7165bc6fcd0 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 25 Jul 2011 17:13:27 -0300 Subject: perf python: Add PERF_RECORD_{LOST,READ,SAMPLE} routine tables So those friggin "spurious" PERF_RECORD_MMAP events were actually a brain fart copy'n'paste error in the python binding, doh. I.e. they weren't MMAPs, just SAMPLEs. Fix it by providing routines for these events instead of using the MMAP ones. Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-b0rc8y5jd03f9f11kftodvkm@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 8e0b5a3..cbc8f21 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -187,16 +187,119 @@ static PyTypeObject pyrf_throttle_event__type = { .tp_repr = (reprfunc)pyrf_throttle_event__repr, }; +static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object."); + +static PyMemberDef pyrf_lost_event__members[] = { + sample_members + member_def(lost_event, id, T_ULONGLONG, "event id"), + member_def(lost_event, lost, T_ULONGLONG, "number of lost events"), + { .name = NULL, }, +}; + +static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent) +{ + PyObject *ret; + char *s; + + if (asprintf(&s, "{ type: lost, id: %#" PRIx64 ", " + "lost: %#" PRIx64 " }", + pevent->event.lost.id, pevent->event.lost.lost) < 0) { + ret = PyErr_NoMemory(); + } else { + ret = PyString_FromString(s); + free(s); + } + return ret; +} + +static PyTypeObject pyrf_lost_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.lost_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_lost_event__doc, + .tp_members = pyrf_lost_event__members, + .tp_repr = (reprfunc)pyrf_lost_event__repr, +}; + +static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object."); + +static PyMemberDef pyrf_read_event__members[] = { + sample_members + member_def(read_event, pid, T_UINT, "event pid"), + member_def(read_event, tid, T_UINT, "event tid"), + { .name = NULL, }, +}; + +static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent) +{ + return PyString_FromFormat("{ type: read, pid: %u, tid: %u }", + pevent->event.read.pid, + pevent->event.read.tid); + /* + * FIXME: return the array of read values, + * making this method useful ;-) + */ +} + +static PyTypeObject pyrf_read_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.read_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_read_event__doc, + .tp_members = pyrf_read_event__members, + .tp_repr = (reprfunc)pyrf_read_event__repr, +}; + +static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object."); + +static PyMemberDef pyrf_sample_event__members[] = { + sample_members + member_def(perf_event_header, type, T_UINT, "event type"), + { .name = NULL, }, +}; + +static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent) +{ + PyObject *ret; + char *s; + + if (asprintf(&s, "{ type: sample }") < 0) { + ret = PyErr_NoMemory(); + } else { + ret = PyString_FromString(s); + free(s); + } + return ret; +} + +static PyTypeObject pyrf_sample_event__type = { + PyVarObject_HEAD_INIT(NULL, 0) + .tp_name = "perf.sample_event", + .tp_basicsize = sizeof(struct pyrf_event), + .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, + .tp_doc = pyrf_sample_event__doc, + .tp_members = pyrf_sample_event__members, + .tp_repr = (reprfunc)pyrf_sample_event__repr, +}; + static int pyrf_event__setup_types(void) { int err; pyrf_mmap_event__type.tp_new = pyrf_task_event__type.tp_new = pyrf_comm_event__type.tp_new = + pyrf_lost_event__type.tp_new = + pyrf_read_event__type.tp_new = + pyrf_sample_event__type.tp_new = pyrf_throttle_event__type.tp_new = PyType_GenericNew; err = PyType_Ready(&pyrf_mmap_event__type); if (err < 0) goto out; + err = PyType_Ready(&pyrf_lost_event__type); + if (err < 0) + goto out; err = PyType_Ready(&pyrf_task_event__type); if (err < 0) goto out; @@ -206,20 +309,26 @@ static int pyrf_event__setup_types(void) err = PyType_Ready(&pyrf_throttle_event__type); if (err < 0) goto out; + err = PyType_Ready(&pyrf_read_event__type); + if (err < 0) + goto out; + err = PyType_Ready(&pyrf_sample_event__type); + if (err < 0) + goto out; out: return err; } static PyTypeObject *pyrf_event__type[] = { [PERF_RECORD_MMAP] = &pyrf_mmap_event__type, - [PERF_RECORD_LOST] = &pyrf_mmap_event__type, + [PERF_RECORD_LOST] = &pyrf_lost_event__type, [PERF_RECORD_COMM] = &pyrf_comm_event__type, [PERF_RECORD_EXIT] = &pyrf_task_event__type, [PERF_RECORD_THROTTLE] = &pyrf_throttle_event__type, [PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type, [PERF_RECORD_FORK] = &pyrf_task_event__type, - [PERF_RECORD_READ] = &pyrf_mmap_event__type, - [PERF_RECORD_SAMPLE] = &pyrf_mmap_event__type, + [PERF_RECORD_READ] = &pyrf_read_event__type, + [PERF_RECORD_SAMPLE] = &pyrf_sample_event__type, }; static PyObject *pyrf_event__new(union perf_event *event) -- cgit v0.10.2 From c86566bbb214706c4820d93843fd4c77f4ecd082 Mon Sep 17 00:00:00 2001 From: Javier Martin Date: Tue, 7 Jun 2011 17:37:18 +0200 Subject: Add tlv320aic32x4 platform data to Visstrim_M10. Without this platform data the aic32x4 audio codec in the Visstrim_M10 won't work properly. Signed-off-by: Javier Martin Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index 7ae43b1..b8e3b58 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -196,6 +197,17 @@ static struct pca953x_platform_data visstrim_m10_pca9555_pdata = { .invert = 0, }; +static struct aic32x4_pdata visstrim_m10_aic32x4_pdata = { + .power_cfg = AIC32X4_PWR_MICBIAS_2075_LDOIN | + AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE | + AIC32X4_PWR_AIC32X4_LDO_ENABLE | + AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36 | + AIC32X4_PWR_CMMODE_HP_LDOIN_POWERED, + .micpga_routing = AIC32X4_MICPGA_ROUTE_LMIC_IN2R_10K | + AIC32X4_MICPGA_ROUTE_RMIC_IN1L_10K, + .swapdacs = false, +}; + static struct i2c_board_info visstrim_m10_i2c_devices[] = { { I2C_BOARD_INFO("pca9555", 0x20), @@ -203,6 +215,7 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = { }, { I2C_BOARD_INFO("tlv320aic32x4", 0x18), + .platform_data = &visstrim_m10_aic32x4_pdata, } }; -- cgit v0.10.2 From ea7aed6bfba3b0638de6f657ab831f1d94b97103 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 14 Jun 2011 15:42:49 -0300 Subject: ARM: mach-imx/mx31ads: Fix section mismatches Fix the following section mismatches: WARNING: vmlinux.o(.text+0x101cc): Section mismatch in reference from the function mxc_init_i2c() to the (unknown reference) .init.data:(unknown) The function mxc_init_i2c() references the (unknown reference) __initdata (unknown). This is often because mxc_init_i2c lacks a __initdata annotation or the annotation of (unknown) is wrong. WARNING: vmlinux.o(.text+0x101d8): Section mismatch in reference from the function mxc_init_i2c() to the variable .init.rodata:imx31_imx_i2c_data The function mxc_init_i2c() references the variable __initconst imx31_imx_i2c_data. This is often because mxc_init_i2c lacks a __initconst annotation or the annotation of imx31_imx_i2c_data is wrong. WARNING: vmlinux.o(.text+0x10200): Section mismatch in reference from the function mxc_init_audio() to the variable .init.rodata:imx31_imx_ssi_data The function mxc_init_audio() references the variable __initconst imx31_imx_ssi_data. This is often because mxc_init_audio lacks a __initconst annotation or the annotation of imx31_imx_ssi_data is wrong. Signed-off-by: Fabio Estevam Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c index f4dee02..2ce3af8 100644 --- a/arch/arm/mach-imx/mach-mx31ads.c +++ b/arch/arm/mach-imx/mach-mx31ads.c @@ -468,7 +468,7 @@ static struct i2c_board_info __initdata mx31ads_i2c1_devices[] = { #endif }; -static void mxc_init_i2c(void) +static void __init mxc_init_i2c(void) { i2c_register_board_info(1, mx31ads_i2c1_devices, ARRAY_SIZE(mx31ads_i2c1_devices)); @@ -486,7 +486,7 @@ static unsigned int ssi_pins[] = { MX31_PIN_STXD5__STXD5, }; -static void mxc_init_audio(void) +static void __init mxc_init_audio(void) { imx31_add_imx_ssi(0, NULL); mxc_iomux_setup_multiple_pins(ssi_pins, ARRAY_SIZE(ssi_pins), "ssi"); -- cgit v0.10.2 From f07c7d6730fa65414f0becf2c50ca1c9ffac9e96 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 14 Jun 2011 15:42:50 -0300 Subject: ARM: mach-imx/mx31lilly: Fix section mismatches Fix the following section mismatches: WARNING: vmlinux.o(.text+0x1026c): Section mismatch in reference from the function lilly1131_usb_init() to the variable .init.rodata:imx31_mxc_ehci_hs_data The function lilly1131_usb_init() references the variable __initconst imx31_mxc_ehci_hs_data. This is often because lilly1131_usb_init lacks a __initconst annotation or the annotation of imx31_mxc_ehci_hs_data is wrong. WARNING: vmlinux.o(.text+0x10270): Section mismatch in reference from the function lilly1131_usb_init() to the (unknown reference) .init.rodata:(unknown) The function lilly1131_usb_init() references the (unknown reference) __initconst (unknown). This is often because lilly1131_usb_init lacks a __initconst annotation or the annotation of (unknown) is wrong. WARNING: vmlinux.o(.text+0x10274): Section mismatch in reference from the function lilly1131_usb_init() to the (unknown reference) .init.data:(unknown) The function lilly1131_usb_init() references the (unknown reference) __initdata (unknown). This is often because lilly1131_usb_init lacks a __initdata annotation or the annotation of (unknown) is wrong. Signed-off-by: Fabio Estevam Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c index 410e676..4bf1deb 100644 --- a/arch/arm/mach-imx/mach-mx31lilly.c +++ b/arch/arm/mach-imx/mach-mx31lilly.c @@ -192,7 +192,7 @@ static struct mxc_usbh_platform_data usbh2_pdata __initdata = { .portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT, }; -static void lilly1131_usb_init(void) +static void __init lilly1131_usb_init(void) { imx31_add_mxc_ehci_hs(1, &usbh1_pdata); -- cgit v0.10.2 From 4e606d68d2939128a15f56cbf546d44872230aad Mon Sep 17 00:00:00 2001 From: Eric Miao Date: Tue, 14 Jun 2011 15:19:47 +0800 Subject: ARM: mx53: add pad configuration for I2C signals I2C signals SDA/SCL are a bit different, they need to be pull-up and open-drain (so to support multiple I2C devices). Signed-off-by: Richard Zhao Signed-off-by: Eric Miao Signed-off-by: Sascha Hauer diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx53.h b/arch/arm/plat-mxc/include/mach/iomux-mx53.h index e95d9cb..468674f 100644 --- a/arch/arm/plat-mxc/include/mach/iomux-mx53.h +++ b/arch/arm/plat-mxc/include/mach/iomux-mx53.h @@ -30,6 +30,9 @@ #define MX53_SDHC_PAD_CTRL (PAD_CTL_HYS | PAD_CTL_PKE | PAD_CTL_PUE | \ PAD_CTL_PUS_47K_UP | PAD_CTL_DSE_HIGH | \ PAD_CTL_SRE_FAST) +#define PAD_CTRL_I2C (PAD_CTL_SRE_FAST | PAD_CTL_ODE | PAD_CTL_PKE | \ + PAD_CTL_PUE | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP \ + | PAD_CTL_HYS) #define _MX53_PAD_GPIO_19__KPP_COL_5 IOMUX_PAD(0x348, 0x20, 0, 0x840, 0, 0) #define _MX53_PAD_GPIO_19__GPIO4_5 IOMUX_PAD(0x348, 0x20, 1, 0x0, 0, 0) @@ -1256,7 +1259,7 @@ #define MX53_PAD_KEY_COL3__GPIO4_12 (_MX53_PAD_KEY_COL3__GPIO4_12 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_COL3__USBOH3_H2_DP (_MX53_PAD_KEY_COL3__USBOH3_H2_DP | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_COL3__SPDIF_IN1 (_MX53_PAD_KEY_COL3__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_KEY_COL3__I2C2_SCL (_MX53_PAD_KEY_COL3__I2C2_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_KEY_COL3__ECSPI1_SS3 (_MX53_PAD_KEY_COL3__ECSPI1_SS3 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_COL3__FEC_CRS (_MX53_PAD_KEY_COL3__FEC_CRS | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK (_MX53_PAD_KEY_COL3__USBPHY1_SIECLOCK | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -1264,7 +1267,7 @@ #define MX53_PAD_KEY_ROW3__GPIO4_13 (_MX53_PAD_KEY_ROW3__GPIO4_13 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_ROW3__USBOH3_H2_DM (_MX53_PAD_KEY_ROW3__USBOH3_H2_DM | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK (_MX53_PAD_KEY_ROW3__CCM_ASRC_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_KEY_ROW3__I2C2_SDA (_MX53_PAD_KEY_ROW3__I2C2_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_KEY_ROW3__OSC32K_32K_OUT (_MX53_PAD_KEY_ROW3__OSC32K_32K_OUT | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_ROW3__CCM_PLL4_BYP (_MX53_PAD_KEY_ROW3__CCM_PLL4_BYP | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 (_MX53_PAD_KEY_ROW3__USBPHY1_LINESTATE_0 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -1536,7 +1539,7 @@ #define MX53_PAD_CSI0_DAT8__KPP_COL_7 (_MX53_PAD_CSI0_DAT8__KPP_COL_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT8__ECSPI2_SCLK (_MX53_PAD_CSI0_DAT8__ECSPI2_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC (_MX53_PAD_CSI0_DAT8__USBOH3_USBH3_OC | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_CSI0_DAT8__I2C1_SDA (_MX53_PAD_CSI0_DAT8__I2C1_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 (_MX53_PAD_CSI0_DAT8__EMI_EMI_DEBUG_37 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 (_MX53_PAD_CSI0_DAT8__TPIU_TRACE_5 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 (_MX53_PAD_CSI0_DAT9__IPU_CSI0_D_9 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -1544,7 +1547,7 @@ #define MX53_PAD_CSI0_DAT9__KPP_ROW_7 (_MX53_PAD_CSI0_DAT9__KPP_ROW_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT9__ECSPI2_MOSI (_MX53_PAD_CSI0_DAT9__ECSPI2_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR (_MX53_PAD_CSI0_DAT9__USBOH3_USBH3_PWR | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_CSI0_DAT9__I2C1_SCL (_MX53_PAD_CSI0_DAT9__I2C1_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 (_MX53_PAD_CSI0_DAT9__EMI_EMI_DEBUG_38 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 (_MX53_PAD_CSI0_DAT9__TPIU_TRACE_6 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 (_MX53_PAD_CSI0_DAT10__IPU_CSI0_D_10 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -1631,25 +1634,25 @@ #define MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK (_MX53_PAD_EIM_EB2__CCM_DI1_EXT_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS (_MX53_PAD_EIM_EB2__IPU_SER_DISP1_CS | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_EB2__ECSPI1_SS0 (_MX53_PAD_EIM_EB2__ECSPI1_SS0 | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_EIM_EB2__I2C2_SCL (_MX53_PAD_EIM_EB2__I2C2_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_EIM_D16__EMI_WEIM_D_16 (_MX53_PAD_EIM_D16__EMI_WEIM_D_16 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D16__GPIO3_16 (_MX53_PAD_EIM_D16__GPIO3_16 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D16__IPU_DI0_PIN5 (_MX53_PAD_EIM_D16__IPU_DI0_PIN5 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK (_MX53_PAD_EIM_D16__IPU_DISPB1_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D16__ECSPI1_SCLK (_MX53_PAD_EIM_D16__ECSPI1_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_EIM_D16__I2C2_SDA (_MX53_PAD_EIM_D16__I2C2_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_EIM_D17__EMI_WEIM_D_17 (_MX53_PAD_EIM_D17__EMI_WEIM_D_17 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D17__GPIO3_17 (_MX53_PAD_EIM_D17__GPIO3_17 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D17__IPU_DI0_PIN6 (_MX53_PAD_EIM_D17__IPU_DI0_PIN6 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN (_MX53_PAD_EIM_D17__IPU_DISPB1_SER_DIN | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D17__ECSPI1_MISO (_MX53_PAD_EIM_D17__ECSPI1_MISO | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_EIM_D17__I2C3_SCL (_MX53_PAD_EIM_D17__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_EIM_D18__EMI_WEIM_D_18 (_MX53_PAD_EIM_D18__EMI_WEIM_D_18 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D18__GPIO3_18 (_MX53_PAD_EIM_D18__GPIO3_18 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D18__IPU_DI0_PIN7 (_MX53_PAD_EIM_D18__IPU_DI0_PIN7 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO (_MX53_PAD_EIM_D18__IPU_DISPB1_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D18__ECSPI1_MOSI (_MX53_PAD_EIM_D18__ECSPI1_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_EIM_D18__I2C3_SDA (_MX53_PAD_EIM_D18__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_EIM_D18__IPU_DI1_D0_CS (_MX53_PAD_EIM_D18__IPU_DI1_D0_CS | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D19__EMI_WEIM_D_19 (_MX53_PAD_EIM_D19__EMI_WEIM_D_19 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D19__GPIO3_19 (_MX53_PAD_EIM_D19__GPIO3_19 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -1672,7 +1675,7 @@ #define MX53_PAD_EIM_D21__IPU_DI0_PIN17 (_MX53_PAD_EIM_D21__IPU_DI0_PIN17 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK (_MX53_PAD_EIM_D21__IPU_DISPB0_SER_CLK | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D21__CSPI_SCLK (_MX53_PAD_EIM_D21__CSPI_SCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_EIM_D21__I2C1_SCL (_MX53_PAD_EIM_D21__I2C1_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_EIM_D21__USBOH3_USBOTG_OC (_MX53_PAD_EIM_D21__USBOH3_USBOTG_OC | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D22__EMI_WEIM_D_22 (_MX53_PAD_EIM_D22__EMI_WEIM_D_22 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D22__GPIO3_22 (_MX53_PAD_EIM_D22__GPIO3_22 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -1732,7 +1735,7 @@ #define MX53_PAD_EIM_D28__UART2_CTS (_MX53_PAD_EIM_D28__UART2_CTS | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO (_MX53_PAD_EIM_D28__IPU_DISPB0_SER_DIO | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D28__CSPI_MOSI (_MX53_PAD_EIM_D28__CSPI_MOSI | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_EIM_D28__I2C1_SDA (_MX53_PAD_EIM_D28__I2C1_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_EIM_D28__IPU_EXT_TRIG (_MX53_PAD_EIM_D28__IPU_EXT_TRIG | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D28__IPU_DI0_PIN13 (_MX53_PAD_EIM_D28__IPU_DI0_PIN13 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_EIM_D29__EMI_WEIM_D_29 (_MX53_PAD_EIM_D29__EMI_WEIM_D_29 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -2297,7 +2300,7 @@ #define MX53_PAD_GPIO_9__SCC_FAIL_STATE (_MX53_PAD_GPIO_9__SCC_FAIL_STATE | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_3__ESAI1_HCKR (_MX53_PAD_GPIO_3__ESAI1_HCKR | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_3__GPIO1_3 (_MX53_PAD_GPIO_3__GPIO1_3 | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_GPIO_3__I2C3_SCL (_MX53_PAD_GPIO_3__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_GPIO_3__DPLLIP1_TOG_EN (_MX53_PAD_GPIO_3__DPLLIP1_TOG_EN | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_3__CCM_CLKO2 (_MX53_PAD_GPIO_3__CCM_CLKO2 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 (_MX53_PAD_GPIO_3__OBSERVE_MUX_OBSRV_INT_OUT0 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -2305,7 +2308,7 @@ #define MX53_PAD_GPIO_3__MLB_MLBCLK (_MX53_PAD_GPIO_3__MLB_MLBCLK | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_6__ESAI1_SCKT (_MX53_PAD_GPIO_6__ESAI1_SCKT | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_6__GPIO1_6 (_MX53_PAD_GPIO_6__GPIO1_6 | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_GPIO_6__I2C3_SDA (_MX53_PAD_GPIO_6__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_GPIO_6__CCM_CCM_OUT_0 (_MX53_PAD_GPIO_6__CCM_CCM_OUT_0 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_6__CSU_CSU_INT_DEB (_MX53_PAD_GPIO_6__CSU_CSU_INT_DEB | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 (_MX53_PAD_GPIO_6__OBSERVE_MUX_OBSRV_INT_OUT1 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -2333,7 +2336,7 @@ #define MX53_PAD_GPIO_5__CCM_CLKO (_MX53_PAD_GPIO_5__CCM_CLKO | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 (_MX53_PAD_GPIO_5__CSU_CSU_ALARM_AUT_2 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 (_MX53_PAD_GPIO_5__OBSERVE_MUX_OBSRV_INT_OUT4 | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_GPIO_5__I2C3_SCL (_MX53_PAD_GPIO_5__I2C3_SCL | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_GPIO_5__CCM_PLL1_BYP (_MX53_PAD_GPIO_5__CCM_PLL1_BYP | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_7__ESAI1_TX4_RX1 (_MX53_PAD_GPIO_7__ESAI1_TX4_RX1 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_7__GPIO1_7 (_MX53_PAD_GPIO_7__GPIO1_7 | MUX_PAD_CTRL(NO_PAD_CTRL)) @@ -2356,7 +2359,7 @@ #define MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT (_MX53_PAD_GPIO_16__TZIC_PWRFAIL_INT | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 (_MX53_PAD_GPIO_16__RTC_CE_RTC_EXT_TRIG1 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_16__SPDIF_IN1 (_MX53_PAD_GPIO_16__SPDIF_IN1 | MUX_PAD_CTRL(NO_PAD_CTRL)) -#define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(NO_PAD_CTRL)) +#define MX53_PAD_GPIO_16__I2C3_SDA (_MX53_PAD_GPIO_16__I2C3_SDA | MUX_PAD_CTRL(PAD_CTRL_I2C)) #define MX53_PAD_GPIO_16__SJC_DE_B (_MX53_PAD_GPIO_16__SJC_DE_B | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_17__ESAI1_TX0 (_MX53_PAD_GPIO_17__ESAI1_TX0 | MUX_PAD_CTRL(NO_PAD_CTRL)) #define MX53_PAD_GPIO_17__GPIO7_12 (_MX53_PAD_GPIO_17__GPIO7_12 | MUX_PAD_CTRL(NO_PAD_CTRL)) -- cgit v0.10.2 From 37df0e5de7612979354593df8edec5c5f3d0b475 Mon Sep 17 00:00:00 2001 From: "Arnaud Patard (Rtp)" Date: Mon, 27 Jun 2011 22:41:04 +0200 Subject: efikasb/mx: fix usbh1 initialisation mx51_initialize_usb_hw() support introduced a small error. The usbh1 has pdev->id equal to 1 not 0, so use pdev->id to make things less error-prone and to fix that. Signed-off-by: Arnaud Patard Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-mx5/mx51_efika.c b/arch/arm/mach-mx5/mx51_efika.c index 56739c2..1105ef3 100644 --- a/arch/arm/mach-mx5/mx51_efika.c +++ b/arch/arm/mach-mx5/mx51_efika.c @@ -186,7 +186,7 @@ static int initialize_usbh1_port(struct platform_device *pdev) mdelay(10); - return mx51_initialize_usb_hw(0, MXC_EHCI_ITC_NO_THRESHOLD); + return mx51_initialize_usb_hw(pdev->id, MXC_EHCI_ITC_NO_THRESHOLD); } static struct mxc_usbh_platform_data usbh1_config = { -- cgit v0.10.2 From f7db3d5f4c27fa93304ead2d7e76d00df3981f02 Mon Sep 17 00:00:00 2001 From: "Arnaud Patard (Rtp)" Date: Mon, 27 Jun 2011 22:41:05 +0200 Subject: efikasb: fix gpio keys. While testing the keys, I only checked that the key were generating an input event but unfortunately, they were not generating the right event. Fix that. Signed-off-by: Arnaud Patard Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c index 474fc6e..8054f3f 100644 --- a/arch/arm/mach-mx5/board-mx51_efikasb.c +++ b/arch/arm/mach-mx5/board-mx51_efikasb.c @@ -163,23 +163,24 @@ static struct gpio_keys_button mx51_efikasb_keys[] = { { .code = KEY_POWER, .gpio = EFIKASB_PWRKEY, - .type = EV_PWR, + .type = EV_KEY, .desc = "Power Button", .wakeup = 1, - .debounce_interval = 10, /* ms */ + .active_low = 1, }, { .code = SW_LID, .gpio = EFIKASB_LID, .type = EV_SW, .desc = "Lid Switch", + .active_low = 1, }, { - /* SW_RFKILLALL vs KEY_RFKILL ? */ - .code = SW_RFKILL_ALL, + .code = KEY_RFKILL, .gpio = EFIKASB_RFKILL, - .type = EV_SW, + .type = EV_KEY, .desc = "rfkill", + .active_low = 1, }, }; -- cgit v0.10.2 From fbd60a7e87213f702889be21ac14c59fa62defe9 Mon Sep 17 00:00:00 2001 From: "Arnaud Patard (Rtp)" Date: Mon, 27 Jun 2011 22:41:06 +0200 Subject: efika: Fix board id detection Current code is assuming that gpio_get_value() is returning 0 or 1 but it should be checking if the value is 0 or not. Not doing it properly is breaking the detection of the board (and thus the reboot of efika mx to2) when using the new mxc gpio driver, which relies on basic mmio gpio. Signed-off-by: Arnaud Patard Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c index 6e36231..5e8dd6a 100644 --- a/arch/arm/mach-mx5/board-mx51_efikamx.c +++ b/arch/arm/mach-mx5/board-mx51_efikamx.c @@ -108,9 +108,9 @@ static void __init mx51_efikamx_board_id(void) gpio_request(EFIKAMX_PCBID2, "pcbid2"); gpio_direction_input(EFIKAMX_PCBID2); - id = gpio_get_value(EFIKAMX_PCBID0); - id |= gpio_get_value(EFIKAMX_PCBID1) << 1; - id |= gpio_get_value(EFIKAMX_PCBID2) << 2; + id = gpio_get_value(EFIKAMX_PCBID0) ? 1 : 0; + id |= (gpio_get_value(EFIKAMX_PCBID1) ? 1 : 0) << 1; + id |= (gpio_get_value(EFIKAMX_PCBID2) ? 1 : 0) << 2; switch (id) { case 7: diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c index 8054f3f..f0c33b3 100644 --- a/arch/arm/mach-mx5/board-mx51_efikasb.c +++ b/arch/arm/mach-mx5/board-mx51_efikasb.c @@ -232,8 +232,8 @@ static void __init mx51_efikasb_board_id(void) gpio_request(EFIKASB_PCBID1, "pcb id1"); gpio_direction_input(EFIKASB_PCBID1); - id = gpio_get_value(EFIKASB_PCBID0); - id |= gpio_get_value(EFIKASB_PCBID1) << 1; + id = gpio_get_value(EFIKASB_PCBID0) ? 1 : 0; + id |= (gpio_get_value(EFIKASB_PCBID1) ? 1 : 0) << 1; switch (id) { default: -- cgit v0.10.2 From 1c4b45d8f6bda8e457f674f3a67aa9b2e57830c7 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 25 Jul 2011 17:46:31 -0300 Subject: ARM: board-mx51_babbage: Fix UART2 registration On MX51 Babbage board the RTS/CTS pins are not used on UART2 port. Fix the registration of this port. Signed-off-by: Fabio Estevam Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c index c7b3fab..98b40aa 100644 --- a/arch/arm/mach-mx5/board-mx51_babbage.c +++ b/arch/arm/mach-mx5/board-mx51_babbage.c @@ -347,7 +347,7 @@ static void __init mx51_babbage_init(void) ARRAY_SIZE(mx51babbage_pads)); imx51_add_imx_uart(0, &uart_pdata); - imx51_add_imx_uart(1, &uart_pdata); + imx51_add_imx_uart(1, NULL); imx51_add_imx_uart(2, &uart_pdata); babbage_fec_reset(); -- cgit v0.10.2 From 1dd75f91ae713049eb6baaa640078f3a6549e522 Mon Sep 17 00:00:00 2001 From: "jhbird.choi@samsung.com" Date: Thu, 21 Jul 2011 15:29:14 +0900 Subject: genirq: Fix wrong bit operation (!msk & 0x01) should be !(msk & 0x01) Signed-off-by: Jonghwan Choi Link: http://lkml.kernel.org/r/1311229754-6003-1-git-send-email-jhbird.choi@samsung.com Signed-off-by: Thomas Gleixner Cc: stable@kernel.org diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 3a2cab4..e38544d 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -246,7 +246,7 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); for (i = gc->irq_base; msk; msk >>= 1, i++) { - if (!msk & 0x01) + if (!(msk & 0x01)) continue; if (flags & IRQ_GC_INIT_NESTED_LOCK) @@ -301,7 +301,7 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, raw_spin_unlock(&gc_lock); for (; msk; msk >>= 1, i++) { - if (!msk & 0x01) + if (!(msk & 0x01)) continue; /* Remove handler first. That will mask the irq line */ -- cgit v0.10.2 From 53cc2820acbdbcc768675bfaff321f3a8680a317 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Jul 2011 09:12:50 +0000 Subject: rtc: Handle errors correctly in rtc_irq_set_state() In rtc_irq_set_state, the code checks the correctness of the parameters, but then goes on to unconditionally arms/disarms the hrtimer. Thus a random task might arm/disarm rtc timer and surprise the real owner by either generating events or by stopping them. Cc: stable@kernel.org Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index df68618..b6bf57f 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -656,6 +656,8 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; + if (err) + goto out; if (enabled) { ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); @@ -664,6 +666,7 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled hrtimer_cancel(&rtc->pie_timer); } rtc->pie_enabled = enabled; +out: spin_unlock_irqrestore(&rtc->irq_task_lock, flags); return err; -- cgit v0.10.2 From 3c8bb90efb6e3105206e4aaa9127395feeda5492 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Jul 2011 09:12:51 +0000 Subject: rtc: Fix hrtimer deadlock Ben reported a lockup related to rtc. The lockup happens due to: CPU0 CPU1 rtc_irq_set_state() __run_hrtimer() spin_lock_irqsave(&rtc->irq_task_lock) rtc_handle_legacy_irq(); spin_lock(&rtc->irq_task_lock); hrtimer_cancel() while (callback_running); So the running callback never finishes as it's blocked on rtc->irq_task_lock. Use hrtimer_try_to_cancel() instead and drop rtc->irq_task_lock while waiting for the callback. Fix this for both rtc_irq_set_state() and rtc_irq_set_freq(). Cc: stable@kernel.org Reported-by: Ben Greear Signed-off-by: Thomas Gleixner Signed-off-by: John Stultz diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index b6bf57f..a1ba2ca 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -636,6 +636,29 @@ void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task) } EXPORT_SYMBOL_GPL(rtc_irq_unregister); +static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled) +{ + /* + * We always cancel the timer here first, because otherwise + * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); + * when we manage to start the timer before the callback + * returns HRTIMER_RESTART. + * + * We cannot use hrtimer_cancel() here as a running callback + * could be blocked on rtc->irq_task_lock and hrtimer_cancel() + * would spin forever. + */ + if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0) + return -1; + + if (enabled) { + ktime_t period = ktime_set(0, NSEC_PER_SEC / rtc->irq_freq); + + hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); + } + return 0; +} + /** * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs * @rtc: the rtc device @@ -651,24 +674,21 @@ int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled int err = 0; unsigned long flags; +retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task != NULL && task == NULL) err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; - if (err) - goto out; - - if (enabled) { - ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); - hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL); - } else { - hrtimer_cancel(&rtc->pie_timer); + if (!err) { + if (rtc_update_hrtimer(rtc, enabled) < 0) { + spin_unlock_irqrestore(&rtc->irq_task_lock, flags); + cpu_relax(); + goto retry; + } + rtc->pie_enabled = enabled; } - rtc->pie_enabled = enabled; -out: spin_unlock_irqrestore(&rtc->irq_task_lock, flags); - return err; } EXPORT_SYMBOL_GPL(rtc_irq_set_state); @@ -690,20 +710,18 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) if (freq <= 0) return -EINVAL; - +retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); if (rtc->irq_task != NULL && task == NULL) err = -EBUSY; if (rtc->irq_task != task) err = -EACCES; - if (err == 0) { + if (!err) { rtc->irq_freq = freq; - if (rtc->pie_enabled) { - ktime_t period; - hrtimer_cancel(&rtc->pie_timer); - period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq); - hrtimer_start(&rtc->pie_timer, period, - HRTIMER_MODE_REL); + if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) { + spin_unlock_irqrestore(&rtc->irq_task_lock, flags); + cpu_relax(); + goto retry; } } spin_unlock_irqrestore(&rtc->irq_task_lock, flags); -- cgit v0.10.2 From 6e7a333eaa522ef73be01caec7a01521490aaf00 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 22 Jul 2011 09:12:51 +0000 Subject: rtc: Limit RTC PIE frequency The RTC pie hrtimer is self rearming. We really need to limit the frequency to something sensible. Thus limit it to the 8192Hz max value from the rtc man documentation Cc: Willy Tarreau Cc: stable@kernel.org Signed-off-by: Thomas Gleixner [jstultz: slightly reworked to use RTC_MAX_FREQ value] Signed-off-by: John Stultz diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a1ba2ca..44e91e5 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -708,7 +708,7 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq) int err = 0; unsigned long flags; - if (freq <= 0) + if (freq <= 0 || freq > RTC_MAX_FREQ) return -EINVAL; retry: spin_lock_irqsave(&rtc->irq_task_lock, flags); diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b27ebea..93f4d03 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -97,6 +97,9 @@ struct rtc_pll_info { #define RTC_AF 0x20 /* Alarm interrupt */ #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ + +#define RTC_MAX_FREQ 8192 + #ifdef __KERNEL__ #include -- cgit v0.10.2 From fd079facb3fdd1b0517f0b2087ac05c30ea09cfe Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 25 Jul 2011 11:01:09 -0700 Subject: KVM: fix TASK_DELAY_ACCT kconfig warning Fix kconfig dependency warning: warning: (KVM) selects TASK_DELAY_ACCT which has unmet direct dependencies (TASKSTATS) Signed-off-by: Randy Dunlap Signed-off-by: Avi Kivity diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 988724b..0a09b58 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -31,6 +31,7 @@ config KVM select KVM_ASYNC_PF select USER_RETURN_NOTIFIER select KVM_MMIO + select TASKSTATS select TASK_DELAY_ACCT ---help--- Support hosting fully virtualized guest machines using hardware -- cgit v0.10.2 From f3637a5f2e2eb391ff5757bc83fb5de8f9726464 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 7 Jul 2011 22:32:17 +0200 Subject: irq: Always set IRQF_ONESHOT if no primary handler is specified If no primary handler is specified then a default one is assigned which always returns IRQ_WAKE_THREAD. This handler requires the IRQF_ONESHOT flag on LEVEL / EIO typed irqs because the source of interrupt is not disabled. Since it is required for those users and there is no difference for others it makes sense to add this flag unconditionally. Signed-off-by: Sebastian Andrzej Siewior Link: http://lkml.kernel.org/r/1310070737-18514-1-git-send-email-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0a7840ae..3f9cd47 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1322,6 +1322,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; + irqflags |= IRQF_ONESHOT; } action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); -- cgit v0.10.2 From b6873807a7143b7d6d8b06809295e559d07d7deb Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 11 Jul 2011 12:17:31 +0200 Subject: irq: Track the owner of irq descriptor Interrupt descriptors can be allocated from modules. The interrupts are used by other modules, but we have no refcount on the module which provides the interrupts and there is no way to establish one on the device level as the interrupt using module is agnostic to the fact that the interrupt is provided by a module rather than by some builtin interrupt controller. To prevent removal of the interrupt providing module, we can track the owner of the interrupt descriptor, which also provides the relevant irq chip functions in the irq descriptor. request/setup_irq() can now acquire a refcount on the owner module to prevent unloading. free_irq() drops the refcount. Signed-off-by: Sebastian Andrzej Siewior Link: http://lkml.kernel.org/r/20110711101731.GA13804@Chamillionaire.breakpoint.cc Signed-off-by: Thomas Gleixner diff --git a/include/linux/irq.h b/include/linux/irq.h index baa397e..16d6f54 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -546,7 +547,15 @@ static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) return d->msi_desc; } -int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); +int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, + struct module *owner); + +static inline int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, + int node) +{ + return __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE); +} + void irq_free_descs(unsigned int irq, unsigned int cnt); int irq_reserve_irqs(unsigned int from, unsigned int cnt); diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 2d921b3..150134a 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -66,6 +66,7 @@ struct irq_desc { #ifdef CONFIG_PROC_FS struct proc_dir_entry *dir; #endif + struct module *owner; const char *name; } ____cacheline_internodealigned_in_smp; diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4c60a50..cb65d03 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -70,7 +70,8 @@ static inline void desc_smp_init(struct irq_desc *desc, int node) { } static inline int desc_node(struct irq_desc *desc) { return 0; } #endif -static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) +static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, + struct module *owner) { int cpu; @@ -86,6 +87,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) desc->irq_count = 0; desc->irqs_unhandled = 0; desc->name = NULL; + desc->owner = owner; for_each_possible_cpu(cpu) *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; desc_smp_init(desc, node); @@ -128,7 +130,7 @@ static void free_masks(struct irq_desc *desc) static inline void free_masks(struct irq_desc *desc) { } #endif -static struct irq_desc *alloc_desc(int irq, int node) +static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) { struct irq_desc *desc; gfp_t gfp = GFP_KERNEL; @@ -147,7 +149,7 @@ static struct irq_desc *alloc_desc(int irq, int node) raw_spin_lock_init(&desc->lock); lockdep_set_class(&desc->lock, &irq_desc_lock_class); - desc_set_defaults(irq, desc, node); + desc_set_defaults(irq, desc, node, owner); return desc; @@ -173,13 +175,14 @@ static void free_desc(unsigned int irq) kfree(desc); } -static int alloc_descs(unsigned int start, unsigned int cnt, int node) +static int alloc_descs(unsigned int start, unsigned int cnt, int node, + struct module *owner) { struct irq_desc *desc; int i; for (i = 0; i < cnt; i++) { - desc = alloc_desc(start + i, node); + desc = alloc_desc(start + i, node, owner); if (!desc) goto err; mutex_lock(&sparse_irq_lock); @@ -227,7 +230,7 @@ int __init early_irq_init(void) nr_irqs = initcnt; for (i = 0; i < initcnt; i++) { - desc = alloc_desc(i, node); + desc = alloc_desc(i, node, NULL); set_bit(i, allocated_irqs); irq_insert_desc(i, desc); } @@ -261,7 +264,7 @@ int __init early_irq_init(void) alloc_masks(&desc[i], GFP_KERNEL, node); raw_spin_lock_init(&desc[i].lock); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - desc_set_defaults(i, &desc[i], node); + desc_set_defaults(i, &desc[i], node, NULL); } return arch_early_irq_init(); } @@ -276,8 +279,16 @@ static void free_desc(unsigned int irq) dynamic_irq_cleanup(irq); } -static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) +static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, + struct module *owner) { + u32 i; + + for (i = 0; i < cnt; i++) { + struct irq_desc *desc = irq_to_desc(start + i); + + desc->owner = owner; + } return start; } @@ -337,7 +348,8 @@ EXPORT_SYMBOL_GPL(irq_free_descs); * Returns the first irq number or error code */ int __ref -irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) +__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, + struct module *owner) { int start, ret; @@ -366,13 +378,13 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) bitmap_set(allocated_irqs, start, cnt); mutex_unlock(&sparse_irq_lock); - return alloc_descs(start, cnt, node); + return alloc_descs(start, cnt, node, owner); err: mutex_unlock(&sparse_irq_lock); return ret; } -EXPORT_SYMBOL_GPL(irq_alloc_descs); +EXPORT_SYMBOL_GPL(__irq_alloc_descs); /** * irq_reserve_irqs - mark irqs allocated @@ -440,7 +452,7 @@ void dynamic_irq_cleanup(unsigned int irq) unsigned long flags; raw_spin_lock_irqsave(&desc->lock, flags); - desc_set_defaults(irq, desc, desc_node(desc)); + desc_set_defaults(irq, desc, desc_node(desc), NULL); raw_spin_unlock_irqrestore(&desc->lock, flags); } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3f9cd47..2e94258 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -883,6 +883,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; + if (!try_module_get(desc->owner)) + return -ENODEV; /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a @@ -906,8 +908,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ nested = irq_settings_is_nested_thread(desc); if (nested) { - if (!new->thread_fn) - return -EINVAL; + if (!new->thread_fn) { + ret = -EINVAL; + goto out_mput; + } /* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the @@ -929,8 +933,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); - if (IS_ERR(t)) - return PTR_ERR(t); + if (IS_ERR(t)) { + ret = PTR_ERR(t); + goto out_mput; + } /* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code @@ -1095,6 +1101,8 @@ out_thread: kthread_stop(t); put_task_struct(t); } +out_mput: + module_put(desc->owner); return ret; } @@ -1203,6 +1211,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) put_task_struct(action->thread); } + module_put(desc->owner); return action; } -- cgit v0.10.2 From f9925d4400927fcf3e25cd371442e47d40b37536 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 28 Jul 2011 12:44:44 +0100 Subject: ASoC: Disable wm_hubs periodic DC servo update This does not function correctly in all circumstances so disable the periodic updates unconditionally for stable; a future patch will reenable where appropriate. Signed-off-by: Mark Brown Acked-by: Liam Girdwood diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 4cc2d56..e763c54 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -440,9 +440,8 @@ static int hp_event(struct snd_soc_dapm_widget *w, reg |= WM8993_HPOUT1L_DLY | WM8993_HPOUT1R_DLY; snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg); - /* Smallest supported update interval */ snd_soc_update_bits(codec, WM8993_DC_SERVO_1, - WM8993_DCS_TIMER_PERIOD_01_MASK, 1); + WM8993_DCS_TIMER_PERIOD_01_MASK, 0); calibrate_dc_servo(codec); -- cgit v0.10.2 From aa387cc895672b00f807ad7c734a2defaf677712 Mon Sep 17 00:00:00 2001 From: Mike Christie Date: Sun, 31 Jul 2011 22:05:09 +0200 Subject: block: add bsg helper library This moves the FC classes bsg code to the block layer and makes it a lib so that other classes like iscsi and SAS can use it. It is helpful because working with the request queue, bios, creating scatterlists, etc are a pain that the LLD does not have to worry about with normal IOs and should not have to worry about for bsg requests. Signed-off-by: Mike Christie Signed-off-by: Jens Axboe diff --git a/block/Kconfig b/block/Kconfig index 60be1e0..e97934e 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -65,6 +65,16 @@ config BLK_DEV_BSG If unsure, say Y. +config BLK_DEV_BSGLIB + bool "Block layer SG support v4 helper lib" + default n + select BLK_DEV_BSG + help + Subsystems will normally enable this if needed. Users will not + normally need to manually enable this. + + If unsure, say N. + config BLK_DEV_INTEGRITY bool "Block layer data integrity support" ---help--- diff --git a/block/Makefile b/block/Makefile index 0fec4b3..514c6e4 100644 --- a/block/Makefile +++ b/block/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o obj-$(CONFIG_BLK_DEV_BSG) += bsg.o +obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o diff --git a/block/bsg-lib.c b/block/bsg-lib.c new file mode 100644 index 0000000..f8c0a61 --- /dev/null +++ b/block/bsg-lib.c @@ -0,0 +1,297 @@ +/* + * BSG helper library + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * Copyright (C) 2011 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include + +/** + * bsg_destroy_job - routine to teardown/delete a bsg job + * @job: bsg_job that is to be torn down + */ +static void bsg_destroy_job(struct bsg_job *job) +{ + put_device(job->dev); /* release reference for the request */ + + kfree(job->request_payload.sg_list); + kfree(job->reply_payload.sg_list); + kfree(job); +} + +/** + * bsg_job_done - completion routine for bsg requests + * @job: bsg_job that is complete + * @result: job reply result + * @reply_payload_rcv_len: length of payload recvd + * + * The LLD should call this when the bsg job has completed. + */ +void bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len) +{ + struct request *req = job->req; + struct request *rsp = req->next_rq; + int err; + + err = job->req->errors = result; + if (err < 0) + /* we're only returning the result field in the reply */ + job->req->sense_len = sizeof(u32); + else + job->req->sense_len = job->reply_len; + /* we assume all request payload was transferred, residual == 0 */ + req->resid_len = 0; + + if (rsp) { + WARN_ON(reply_payload_rcv_len > rsp->resid_len); + + /* set reply (bidi) residual */ + rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len); + } + blk_complete_request(req); +} +EXPORT_SYMBOL_GPL(bsg_job_done); + +/** + * bsg_softirq_done - softirq done routine for destroying the bsg requests + * @rq: BSG request that holds the job to be destroyed + */ +static void bsg_softirq_done(struct request *rq) +{ + struct bsg_job *job = rq->special; + + blk_end_request_all(rq, rq->errors); + bsg_destroy_job(job); +} + +static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) +{ + size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); + + BUG_ON(!req->nr_phys_segments); + + buf->sg_list = kzalloc(sz, GFP_KERNEL); + if (!buf->sg_list) + return -ENOMEM; + sg_init_table(buf->sg_list, req->nr_phys_segments); + buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); + buf->payload_len = blk_rq_bytes(req); + return 0; +} + +/** + * bsg_create_job - create the bsg_job structure for the bsg request + * @dev: device that is being sent the bsg request + * @req: BSG request that needs a job structure + */ +static int bsg_create_job(struct device *dev, struct request *req) +{ + struct request *rsp = req->next_rq; + struct request_queue *q = req->q; + struct bsg_job *job; + int ret; + + BUG_ON(req->special); + + job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); + if (!job) + return -ENOMEM; + + req->special = job; + job->req = req; + if (q->bsg_job_size) + job->dd_data = (void *)&job[1]; + job->request = req->cmd; + job->request_len = req->cmd_len; + job->reply = req->sense; + job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer + * allocated */ + if (req->bio) { + ret = bsg_map_buffer(&job->request_payload, req); + if (ret) + goto failjob_rls_job; + } + if (rsp && rsp->bio) { + ret = bsg_map_buffer(&job->reply_payload, rsp); + if (ret) + goto failjob_rls_rqst_payload; + } + job->dev = dev; + /* take a reference for the request */ + get_device(job->dev); + return 0; + +failjob_rls_rqst_payload: + kfree(job->request_payload.sg_list); +failjob_rls_job: + kfree(job); + return -ENOMEM; +} + +/* + * bsg_goose_queue - restart queue in case it was stopped + * @q: request q to be restarted + */ +void bsg_goose_queue(struct request_queue *q) +{ + if (!q) + return; + + blk_run_queue_async(q); +} +EXPORT_SYMBOL_GPL(bsg_goose_queue); + +/** + * bsg_request_fn - generic handler for bsg requests + * @q: request queue to manage + * + * On error the create_bsg_job function should return a -Exyz error value + * that will be set to the req->errors. + * + * Drivers/subsys should pass this to the queue init function. + */ +void bsg_request_fn(struct request_queue *q) +{ + struct device *dev = q->queuedata; + struct request *req; + struct bsg_job *job; + int ret; + + if (!get_device(dev)) + return; + + while (1) { + req = blk_fetch_request(q); + if (!req) + break; + spin_unlock_irq(q->queue_lock); + + ret = bsg_create_job(dev, req); + if (ret) { + req->errors = ret; + blk_end_request_all(req, ret); + spin_lock_irq(q->queue_lock); + continue; + } + + job = req->special; + ret = q->bsg_job_fn(job); + spin_lock_irq(q->queue_lock); + if (ret) + break; + } + + spin_unlock_irq(q->queue_lock); + put_device(dev); + spin_lock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(bsg_request_fn); + +/** + * bsg_setup_queue - Create and add the bsg hooks so we can receive requests + * @dev: device to attach bsg device to + * @q: request queue setup by caller + * @name: device to give bsg device + * @job_fn: bsg job handler + * @dd_job_size: size of LLD data needed for each job + * + * The caller should have setup the reuqest queue with bsg_request_fn + * as the request_fn. + */ +int bsg_setup_queue(struct device *dev, struct request_queue *q, + char *name, bsg_job_fn *job_fn, int dd_job_size) +{ + int ret; + + q->queuedata = dev; + q->bsg_job_size = dd_job_size; + q->bsg_job_fn = job_fn; + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); + blk_queue_softirq_done(q, bsg_softirq_done); + blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); + + ret = bsg_register_queue(q, dev, name, NULL); + if (ret) { + printk(KERN_ERR "%s: bsg interface failed to " + "initialize - register queue\n", dev->kobj.name); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(bsg_setup_queue); + +/** + * bsg_remove_queue - Deletes the bsg dev from the q + * @q: the request_queue that is to be torn down. + * + * Notes: + * Before unregistering the queue empty any requests that are blocked + */ +void bsg_remove_queue(struct request_queue *q) +{ + struct request *req; /* block request */ + int counts; /* totals for request_list count and starved */ + + if (!q) + return; + + /* Stop taking in new requests */ + spin_lock_irq(q->queue_lock); + blk_stop_queue(q); + + /* drain all requests in the queue */ + while (1) { + /* need the lock to fetch a request + * this may fetch the same reqeust as the previous pass + */ + req = blk_fetch_request(q); + /* save requests in use and starved */ + counts = q->rq.count[0] + q->rq.count[1] + + q->rq.starved[0] + q->rq.starved[1]; + spin_unlock_irq(q->queue_lock); + /* any requests still outstanding? */ + if (counts == 0) + break; + + /* This may be the same req as the previous iteration, + * always send the blk_end_request_all after a prefetch. + * It is not okay to not end the request because the + * prefetch started the request. + */ + if (req) { + /* return -ENXIO to indicate that this queue is + * going away + */ + req->errors = -ENXIO; + blk_end_request_all(req, -ENXIO); + } + + msleep(200); /* allow bsg to possibly finish */ + spin_lock_irq(q->queue_lock); + } + bsg_unregister_queue(q); +} +EXPORT_SYMBOL_GPL(bsg_remove_queue); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0e67c45..8479285 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -30,6 +30,7 @@ struct request_pm_state; struct blk_trace; struct request; struct sg_io_hdr; +struct bsg_job; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -209,6 +210,7 @@ typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, typedef void (softirq_done_fn)(struct request *); typedef int (dma_drain_needed_fn)(struct request *); typedef int (lld_busy_fn) (struct request_queue *q); +typedef int (bsg_job_fn) (struct bsg_job *); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED, @@ -375,6 +377,8 @@ struct request_queue { struct mutex sysfs_lock; #if defined(CONFIG_BLK_DEV_BSG) + bsg_job_fn *bsg_job_fn; + int bsg_job_size; struct bsg_class_device bsg_dev; #endif diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h new file mode 100644 index 0000000..f55ab8c --- /dev/null +++ b/include/linux/bsg-lib.h @@ -0,0 +1,73 @@ +/* + * BSG helper library + * + * Copyright (C) 2008 James Smart, Emulex Corporation + * Copyright (C) 2011 Red Hat, Inc. All rights reserved. + * Copyright (C) 2011 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#ifndef _BLK_BSG_ +#define _BLK_BSG_ + +#include + +struct request; +struct device; +struct scatterlist; +struct request_queue; + +struct bsg_buffer { + unsigned int payload_len; + int sg_cnt; + struct scatterlist *sg_list; +}; + +struct bsg_job { + struct device *dev; + struct request *req; + + /* Transport/driver specific request/reply structs */ + void *request; + void *reply; + + unsigned int request_len; + unsigned int reply_len; + /* + * On entry : reply_len indicates the buffer size allocated for + * the reply. + * + * Upon completion : the message handler must set reply_len + * to indicates the size of the reply to be returned to the + * caller. + */ + + /* DMA payloads for the request/response */ + struct bsg_buffer request_payload; + struct bsg_buffer reply_payload; + + void *dd_data; /* Used for driver-specific storage */ +}; + +void bsg_job_done(struct bsg_job *job, int result, + unsigned int reply_payload_rcv_len); +int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, + bsg_job_fn *job_fn, int dd_job_size); +void bsg_request_fn(struct request_queue *q); +void bsg_remove_queue(struct request_queue *q); +void bsg_goose_queue(struct request_queue *q); + +#endif -- cgit v0.10.2 From 34dd82afd27da2537199d7f71f1542501c6f96e7 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Sun, 31 Jul 2011 22:08:04 +0200 Subject: loop: replace linked list of allocated devices with an idr index Replace the linked list, that keeps track of allocated devices, with an idr index to allow a more efficient lookup of devices. Cc: Tejun Heo Signed-off-by: Kay Sievers Signed-off-by: Jens Axboe diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 76c8da7..f58532e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -78,8 +78,8 @@ #include -static LIST_HEAD(loop_devices); -static DEFINE_MUTEX(loop_devices_mutex); +static DEFINE_IDR(loop_index_idr); +static DEFINE_MUTEX(loop_index_mutex); static int max_part; static int part_shift; @@ -722,17 +722,10 @@ static inline int is_loop_device(struct file *file) static ssize_t loop_attr_show(struct device *dev, char *page, ssize_t (*callback)(struct loop_device *, char *)) { - struct loop_device *l, *lo = NULL; - - mutex_lock(&loop_devices_mutex); - list_for_each_entry(l, &loop_devices, lo_list) - if (disk_to_dev(l->lo_disk) == dev) { - lo = l; - break; - } - mutex_unlock(&loop_devices_mutex); + struct gendisk *disk = dev_to_disk(dev); + struct loop_device *lo = disk->private_data; - return lo ? callback(lo, page) : -EIO; + return callback(lo, page); } #define LOOP_ATTR_RO(_name) \ @@ -1557,40 +1550,64 @@ int loop_register_transfer(struct loop_func_table *funcs) return 0; } +static int unregister_transfer_cb(int id, void *ptr, void *data) +{ + struct loop_device *lo = ptr; + struct loop_func_table *xfer = data; + + mutex_lock(&lo->lo_ctl_mutex); + if (lo->lo_encryption == xfer) + loop_release_xfer(lo); + mutex_unlock(&lo->lo_ctl_mutex); + return 0; +} + int loop_unregister_transfer(int number) { unsigned int n = number; - struct loop_device *lo; struct loop_func_table *xfer; if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL) return -EINVAL; xfer_funcs[n] = NULL; - - list_for_each_entry(lo, &loop_devices, lo_list) { - mutex_lock(&lo->lo_ctl_mutex); - - if (lo->lo_encryption == xfer) - loop_release_xfer(lo); - - mutex_unlock(&lo->lo_ctl_mutex); - } - + idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer); return 0; } EXPORT_SYMBOL(loop_register_transfer); EXPORT_SYMBOL(loop_unregister_transfer); -static struct loop_device *loop_alloc(int i) +static int loop_add(struct loop_device **l, int i) { struct loop_device *lo; struct gendisk *disk; + int err; lo = kzalloc(sizeof(*lo), GFP_KERNEL); - if (!lo) + if (!lo) { + err = -ENOMEM; goto out; + } + + err = idr_pre_get(&loop_index_idr, GFP_KERNEL); + if (err < 0) + goto out_free_dev; + + if (i >= 0) { + int m; + + /* create specific i in the index */ + err = idr_get_new_above(&loop_index_idr, lo, i, &m); + if (err >= 0 && i != m) { + idr_remove(&loop_index_idr, m); + err = -EEXIST; + } + } else { + err = -EINVAL; + } + if (err < 0) + goto out_free_dev; lo->lo_queue = blk_alloc_queue(GFP_KERNEL); if (!lo->lo_queue) @@ -1611,56 +1628,54 @@ static struct loop_device *loop_alloc(int i) disk->private_data = lo; disk->queue = lo->lo_queue; sprintf(disk->disk_name, "loop%d", i); - return lo; + add_disk(disk); + *l = lo; + return lo->lo_number; out_free_queue: blk_cleanup_queue(lo->lo_queue); out_free_dev: kfree(lo); out: - return NULL; + return err; } -static void loop_free(struct loop_device *lo) +static void loop_remove(struct loop_device *lo) { + del_gendisk(lo->lo_disk); blk_cleanup_queue(lo->lo_queue); put_disk(lo->lo_disk); - list_del(&lo->lo_list); kfree(lo); } -static struct loop_device *loop_init_one(int i) +static int loop_lookup(struct loop_device **l, int i) { struct loop_device *lo; + int ret = -ENODEV; - list_for_each_entry(lo, &loop_devices, lo_list) { - if (lo->lo_number == i) - return lo; - } - - lo = loop_alloc(i); + lo = idr_find(&loop_index_idr, i); if (lo) { - add_disk(lo->lo_disk); - list_add_tail(&lo->lo_list, &loop_devices); + *l = lo; + ret = lo->lo_number; } - return lo; -} - -static void loop_del_one(struct loop_device *lo) -{ - del_gendisk(lo->lo_disk); - loop_free(lo); + return ret; } static struct kobject *loop_probe(dev_t dev, int *part, void *data) { struct loop_device *lo; struct kobject *kobj; + int err; - mutex_lock(&loop_devices_mutex); - lo = loop_init_one(MINOR(dev) >> part_shift); - kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); - mutex_unlock(&loop_devices_mutex); + mutex_lock(&loop_index_mutex); + err = loop_lookup(&lo, MINOR(dev) >> part_shift); + if (err < 0) + err = loop_add(&lo, MINOR(dev) >> part_shift); + if (err < 0) + kobj = ERR_PTR(err); + else + kobj = get_disk(lo->lo_disk); + mutex_unlock(&loop_index_mutex); *part = 0; return kobj; @@ -1670,7 +1685,7 @@ static int __init loop_init(void) { int i, nr; unsigned long range; - struct loop_device *lo, *next; + struct loop_device *lo; /* * loop module now has a feature to instantiate underlying device @@ -1719,43 +1734,36 @@ static int __init loop_init(void) if (register_blkdev(LOOP_MAJOR, "loop")) return -EIO; - for (i = 0; i < nr; i++) { - lo = loop_alloc(i); - if (!lo) - goto Enomem; - list_add_tail(&lo->lo_list, &loop_devices); - } - - /* point of no return */ - - list_for_each_entry(lo, &loop_devices, lo_list) - add_disk(lo->lo_disk); - blk_register_region(MKDEV(LOOP_MAJOR, 0), range, THIS_MODULE, loop_probe, NULL, NULL); + /* pre-create number devices of devices given by config or max_loop */ + mutex_lock(&loop_index_mutex); + for (i = 0; i < nr; i++) + loop_add(&lo, i); + mutex_unlock(&loop_index_mutex); + printk(KERN_INFO "loop: module loaded\n"); return 0; +} -Enomem: - printk(KERN_INFO "loop: out of memory\n"); - - list_for_each_entry_safe(lo, next, &loop_devices, lo_list) - loop_free(lo); +static int loop_exit_cb(int id, void *ptr, void *data) +{ + struct loop_device *lo = ptr; - unregister_blkdev(LOOP_MAJOR, "loop"); - return -ENOMEM; + loop_remove(lo); + return 0; } static void __exit loop_exit(void) { unsigned long range; - struct loop_device *lo, *next; range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; - list_for_each_entry_safe(lo, next, &loop_devices, lo_list) - loop_del_one(lo); + idr_for_each(&loop_index_idr, &loop_exit_cb, NULL); + idr_remove_all(&loop_index_idr); + idr_destroy(&loop_index_idr); blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); unregister_blkdev(LOOP_MAJOR, "loop"); diff --git a/include/linux/loop.h b/include/linux/loop.h index 66c194e..5f08d18 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -64,7 +64,6 @@ struct loop_device { struct request_queue *lo_queue; struct gendisk *lo_disk; - struct list_head lo_list; }; #endif /* __KERNEL__ */ -- cgit v0.10.2 From 770fe30a46a12b6fb6b63fbe1737654d28e84844 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Sun, 31 Jul 2011 22:08:04 +0200 Subject: loop: add management interface for on-demand device allocation Loop devices today have a fixed pre-allocated number of usually 8. The number can only be changed at module init time. To find a free device to use, /dev/loop%i needs to be scanned, and all devices need to be opened until a free one is possibly found. This adds a new /dev/loop-control device node, that allows to dynamically find or allocate a free device, and to add and remove loop devices from the running system: LOOP_CTL_ADD adds a specific device. Arg is the number of the device. It returns the device i or a negative error code. LOOP_CTL_REMOVE removes a specific device, Arg is the number the device. It returns the device i or a negative error code. LOOP_CTL_GET_FREE finds the next unbound device or allocates a new one. No arg is given. It returns the device i or a negative error code. The loop kernel module gets automatically loaded when /dev/loop-control is accessed the first time. The alias specified in the module, instructs udev to create this 'dead' device node, even when the module is not loaded. Example: cfd = open("/dev/loop-control", O_RDWR); # add a new specific loop device err = ioctl(cfd, LOOP_CTL_ADD, devnr); # remove a specific loop device err = ioctl(cfd, LOOP_CTL_REMOVE, devnr); # find or allocate a free loop device to use devnr = ioctl(cfd, LOOP_CTL_GET_FREE); sprintf(loopname, "/dev/loop%i", devnr); ffd = open("backing-file", O_RDWR); lfd = open(loopname, O_RDWR); err = ioctl(lfd, LOOP_SET_FD, ffd); Cc: Tejun Heo Cc: Karel Zak Signed-off-by: Kay Sievers Signed-off-by: Jens Axboe diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f58532e..5c9edf9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -75,7 +75,7 @@ #include #include #include - +#include #include static DEFINE_IDR(loop_index_idr); @@ -1478,13 +1478,22 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, static int lo_open(struct block_device *bdev, fmode_t mode) { - struct loop_device *lo = bdev->bd_disk->private_data; + struct loop_device *lo; + int err = 0; + + mutex_lock(&loop_index_mutex); + lo = bdev->bd_disk->private_data; + if (!lo) { + err = -ENXIO; + goto out; + } mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); - - return 0; +out: + mutex_unlock(&loop_index_mutex); + return err; } static int lo_release(struct gendisk *disk, fmode_t mode) @@ -1603,6 +1612,13 @@ static int loop_add(struct loop_device **l, int i) idr_remove(&loop_index_idr, m); err = -EEXIST; } + } else if (i == -1) { + int m; + + /* get next free nr */ + err = idr_get_new(&loop_index_idr, lo, &m); + if (err >= 0) + i = m; } else { err = -EINVAL; } @@ -1648,16 +1664,41 @@ static void loop_remove(struct loop_device *lo) kfree(lo); } +static int find_free_cb(int id, void *ptr, void *data) +{ + struct loop_device *lo = ptr; + struct loop_device **l = data; + + if (lo->lo_state == Lo_unbound) { + *l = lo; + return 1; + } + return 0; +} + static int loop_lookup(struct loop_device **l, int i) { struct loop_device *lo; int ret = -ENODEV; + if (i < 0) { + int err; + + err = idr_for_each(&loop_index_idr, &find_free_cb, &lo); + if (err == 1) { + *l = lo; + ret = lo->lo_number; + } + goto out; + } + + /* lookup and return a specific i */ lo = idr_find(&loop_index_idr, i); if (lo) { *l = lo; ret = lo->lo_number; } +out: return ret; } @@ -1681,11 +1722,76 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) return kobj; } +static long loop_control_ioctl(struct file *file, unsigned int cmd, + unsigned long parm) +{ + struct loop_device *lo; + int ret = -ENOSYS; + + mutex_lock(&loop_index_mutex); + switch (cmd) { + case LOOP_CTL_ADD: + ret = loop_lookup(&lo, parm); + if (ret >= 0) { + ret = -EEXIST; + break; + } + ret = loop_add(&lo, parm); + break; + case LOOP_CTL_REMOVE: + ret = loop_lookup(&lo, parm); + if (ret < 0) + break; + mutex_lock(&lo->lo_ctl_mutex); + if (lo->lo_state != Lo_unbound) { + ret = -EBUSY; + mutex_unlock(&lo->lo_ctl_mutex); + break; + } + if (lo->lo_refcnt > 0) { + ret = -EBUSY; + mutex_unlock(&lo->lo_ctl_mutex); + break; + } + lo->lo_disk->private_data = NULL; + mutex_unlock(&lo->lo_ctl_mutex); + idr_remove(&loop_index_idr, lo->lo_number); + loop_remove(lo); + break; + case LOOP_CTL_GET_FREE: + ret = loop_lookup(&lo, -1); + if (ret >= 0) + break; + ret = loop_add(&lo, -1); + } + mutex_unlock(&loop_index_mutex); + + return ret; +} + +static const struct file_operations loop_ctl_fops = { + .open = nonseekable_open, + .unlocked_ioctl = loop_control_ioctl, + .compat_ioctl = loop_control_ioctl, + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +static struct miscdevice loop_misc = { + .minor = LOOP_CTRL_MINOR, + .name = "loop-control", + .fops = &loop_ctl_fops, +}; + +MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); +MODULE_ALIAS("devname:loop-control"); + static int __init loop_init(void) { int i, nr; unsigned long range; struct loop_device *lo; + int err; /* * loop module now has a feature to instantiate underlying device @@ -1702,6 +1808,10 @@ static int __init loop_init(void) * device on-demand. */ + err = misc_register(&loop_misc); + if (err < 0) + return err; + part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); @@ -1767,6 +1877,8 @@ static void __exit loop_exit(void) blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); unregister_blkdev(LOOP_MAJOR, "loop"); + + misc_deregister(&loop_misc); } module_init(loop_init); diff --git a/include/linux/loop.h b/include/linux/loop.h index 5f08d18..683d698 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -160,4 +160,8 @@ int loop_unregister_transfer(int number); #define LOOP_CHANGE_FD 0x4C06 #define LOOP_SET_CAPACITY 0x4C07 +/* /dev/loop-control interface */ +#define LOOP_CTL_ADD 0x4C80 +#define LOOP_CTL_REMOVE 0x4C81 +#define LOOP_CTL_GET_FREE 0x4C82 #endif diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 18fd130..c309b1e 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -40,6 +40,7 @@ #define BTRFS_MINOR 234 #define AUTOFS_MINOR 235 #define MAPPER_CTRL_MINOR 236 +#define LOOP_CTRL_MINOR 237 #define MISC_DYNAMIC_MINOR 255 struct device; -- cgit v0.10.2 From d134b00b9acca3fb054d7c88a5f5d562ecbb42d1 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Sun, 31 Jul 2011 22:08:04 +0200 Subject: loop: add BLK_DEV_LOOP_MIN_COUNT=%i to allow distros 0 pre-allocated loop devices Instead of unconditionally creating a fixed number of dead loop devices which need to be investigated by storage handling services, even when they are never used, we allow distros start with 0 loop devices and have losetup(8) and similar switch to the dynamic /dev/loop-control interface instead of searching /dev/loop%i for free devices. Signed-off-by: Kay Sievers Signed-off-by: Jens Axboe diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4ca9389..c328511 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1340,9 +1340,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. it is equivalent to "nosmp", which also disables the IO APIC. - max_loop= [LOOP] Maximum number of loopback devices that can - be mounted - Format: <1-256> + max_loop= [LOOP] The number of loop block devices that get + (loop.max_loop) unconditionally pre-created at init time. The default + number is configured by BLK_DEV_LOOP_MIN_COUNT. Instead + of statically allocating a predefined number, loop + devices can be requested on-demand with the + /dev/loop-control interface. mcatest= [IA-64] diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 717d6e4..57212c5 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -256,6 +256,21 @@ config BLK_DEV_LOOP Most users will answer N here. +config BLK_DEV_LOOP_MIN_COUNT + int "Number of loop devices to pre-create at init time" + depends on BLK_DEV_LOOP + default 8 + help + Static number of loop devices to be unconditionally pre-created + at init time. + + This default value can be overwritten on the kernel command + line or with module-parameter loop.max_loop. + + The historic default is 8. If a late 2011 version of losetup(8) + is used, it can be set to 0, since needed loop devices can be + dynamically allocated with the /dev/loop-control interface. + config BLK_DEV_CRYPTOLOOP tristate "Cryptoloop Support" select CRYPTO diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5c9edf9..3defc52 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1793,21 +1793,6 @@ static int __init loop_init(void) struct loop_device *lo; int err; - /* - * loop module now has a feature to instantiate underlying device - * structure on-demand, provided that there is an access dev node. - * However, this will not work well with user space tool that doesn't - * know about such "feature". In order to not break any existing - * tool, we do the following: - * - * (1) if max_loop is specified, create that many upfront, and this - * also becomes a hard limit. - * (2) if max_loop is not specified, create 8 loop device on module - * load, user can further extend loop device by create dev node - * themselves and have kernel automatically instantiate actual - * device on-demand. - */ - err = misc_register(&loop_misc); if (err < 0) return err; @@ -1833,11 +1818,19 @@ static int __init loop_init(void) if (max_loop > 1UL << (MINORBITS - part_shift)) return -EINVAL; + /* + * If max_loop is specified, create that many devices upfront. + * This also becomes a hard limit. If max_loop is not specified, + * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module + * init time. Loop devices can be requested on-demand with the + * /dev/loop-control interface, or be instantiated by accessing + * a 'dead' device node. + */ if (max_loop) { nr = max_loop; range = max_loop << part_shift; } else { - nr = 8; + nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT; range = 1UL << MINORBITS; } @@ -1847,7 +1840,7 @@ static int __init loop_init(void) blk_register_region(MKDEV(LOOP_MAJOR, 0), range, THIS_MODULE, loop_probe, NULL, NULL); - /* pre-create number devices of devices given by config or max_loop */ + /* pre-create number of devices given by config or max_loop */ mutex_lock(&loop_index_mutex); for (i = 0; i < nr; i++) loop_add(&lo, i); -- cgit v0.10.2 From 05eb0f252b04aa94ace0794f73d56c6a02351d80 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Sun, 31 Jul 2011 22:21:35 +0200 Subject: loop: fix deadlock when sysfs and LOOP_CLR_FD race against each other LOOP_CLR_FD takes lo->lo_ctl_mutex and tries to remove the loop sysfs files. Sysfs calls show() and waits for lo->lo_ctl_mutex. LOOP_CLR_FD waits for show() to finish to remove the sysfs file. cat /sys/class/block/loop0/loop/backing_file mutex_lock_nested+0x176/0x350 ? loop_attr_do_show_backing_file+0x2f/0xd0 [loop] ? loop_attr_do_show_backing_file+0x2f/0xd0 [loop] loop_attr_do_show_backing_file+0x2f/0xd0 [loop] dev_attr_show+0x1b/0x60 ? sysfs_read_file+0x86/0x1a0 ? __get_free_pages+0x12/0x50 sysfs_read_file+0xaf/0x1a0 ioctl(LOOP_CLR_FD): wait_for_common+0x12c/0x180 ? try_to_wake_up+0x2a0/0x2a0 wait_for_completion+0x18/0x20 sysfs_deactivate+0x178/0x180 ? sysfs_addrm_finish+0x43/0x70 ? sysfs_addrm_start+0x1d/0x20 sysfs_addrm_finish+0x43/0x70 sysfs_hash_and_remove+0x85/0xa0 sysfs_remove_group+0x59/0x100 loop_clr_fd+0x1dc/0x3f0 [loop] lo_ioctl+0x223/0x7a0 [loop] Instead of taking the lo_ctl_mutex from sysfs code, take the inner lo->lo_lock, to protect the access to the backing_file data. Thanks to Tejun for help debugging and finding a solution. Cc: Milan Broz Cc: Tejun Heo Signed-off-by: Kay Sievers Cc: stable@kernel.org Signed-off-by: Jens Axboe diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3defc52..4720c7a 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -743,10 +743,10 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) ssize_t ret; char *p = NULL; - mutex_lock(&lo->lo_ctl_mutex); + spin_lock_irq(&lo->lo_lock); if (lo->lo_backing_file) p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); - mutex_unlock(&lo->lo_ctl_mutex); + spin_unlock_irq(&lo->lo_lock); if (IS_ERR_OR_NULL(p)) ret = PTR_ERR(p); @@ -1000,7 +1000,9 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) kthread_stop(lo->lo_thread); + spin_lock_irq(&lo->lo_lock); lo->lo_backing_file = NULL; + spin_unlock_irq(&lo->lo_lock); loop_release_xfer(lo); lo->transfer = NULL; -- cgit v0.10.2 From e5a94f56845bb4b272d82e84b5a1e2080b07ba82 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 1 Aug 2011 10:31:06 +0200 Subject: blk-throttle: correctly determine sync bio read request is always sync. Using rw_is_sync() to determine if a bio is sync. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe diff --git a/block/blk-throttle.c b/block/blk-throttle.c index f6a7941..a19f58c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -746,7 +746,7 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg, static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); - bool sync = bio->bi_rw & REQ_SYNC; + bool sync = rw_is_sync(bio->bi_rw); /* Charge the bio to the group */ tg->bytes_disp[rw] += bio->bi_size; @@ -1150,7 +1150,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) if (tg_no_rule_group(tg, rw)) { blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, - rw, bio->bi_rw & REQ_SYNC); + rw, rw_is_sync(bio->bi_rw)); rcu_read_unlock(); return 0; } -- cgit v0.10.2 From 2012d9ca2a1381ae3e733330a7f0d1d2f1988bba Mon Sep 17 00:00:00 2001 From: "Mehnert, Torsten" Date: Mon, 1 Aug 2011 08:02:07 +0000 Subject: i.MX25 GPT clock fix: ensure correct the clock source Request for comment and commit. From: T. Mehnert Date: Mon, 4 Jul 2011 15:53:30 +0200 Subject: [PATCH] i.MX25 GPT clock fix: ensure correct the clock source This patch ensures, that Linux will take the correct clock source (AHB_DIV) for gpt in the ARM i.MX25 implementation. The currect code depends on the reset defaults of the CCM_MCR register. So on some boards it could happen that the UPLL is used for clock source, which results in faulty time behavior in Linux. In this case all delays or sleeps will will be faktor 1.8 too long. Signed-off-by: Torsten Mehnert Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c index a65838f..e40c0b3 100644 --- a/arch/arm/mach-imx/clock-imx25.c +++ b/arch/arm/mach-imx/clock-imx25.c @@ -328,6 +328,9 @@ int __init mx25_clocks_init(void) __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0), CRM_BASE + 0x64); + /* Clock source for gpt is ahb_div */ + __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64); + mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); return 0; -- cgit v0.10.2 From 7bd89b4017f46a9b92853940fd9771319acb578a Mon Sep 17 00:00:00 2001 From: Sarah Sharp Date: Fri, 1 Jul 2011 13:35:40 -0700 Subject: xhci: Don't submit commands or URBs to halted hosts. Commit fccf4e86200b8f5edd9a65da26f150e32ba79808 "USB: Free bandwidth when usb_disable_device is called" caused a bit of an issue when the xHCI host controller driver is unloaded. It changed the USB core to remove all endpoints when a USB device is disabled. When the driver is unloaded, it will remove the SuperSpeed split root hub, which will disable all devices under that roothub and then halt the host controller. When the second High Speed split roothub is removed, the USB core will attempt to disable the endpoints, which will submit a Configure Endpoint command to a halted host controller. The command will eventually time out, but it makes the xHCI driver unload take *minutes* if there are a couple of USB 1.1/2.0 devices attached. We must halt the host controller when the SuperSpeed roothub is removed, because we can't allow any interrupts from things like port status changes. Make several different functions not submit commands or URBs to the host controller when the host is halted, by adding a check in xhci_check_args(). xhci_check_args() is used by these functions: xhci.c-int xhci_urb_enqueue() xhci.c-int xhci_drop_endpoint() xhci.c-int xhci_add_endpoint() xhci.c-int xhci_check_bandwidth() xhci.c-void xhci_reset_bandwidth() xhci.c-static int xhci_check_streams_endpoint() xhci.c-int xhci_discover_or_reset_device() It's also used by xhci_free_dev(). However, we have to take special care in that case, because we want the device memory to be freed if the host controller is halted. This patch should be backported to the 2.6.39 and 3.0 kernel. Signed-off-by: Sarah Sharp Cc: stable@kernel.org diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 763f484..1c4432d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -345,7 +345,8 @@ static void xhci_event_ring_work(unsigned long arg) spin_lock_irqsave(&xhci->lock, flags); temp = xhci_readl(xhci, &xhci->op_regs->status); xhci_dbg(xhci, "op reg status = 0x%x\n", temp); - if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { + if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { xhci_dbg(xhci, "HW died, polling stopped.\n"); spin_unlock_irqrestore(&xhci->lock, flags); return; @@ -939,8 +940,11 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, return 0; } + xhci = hcd_to_xhci(hcd); + if (xhci->xhc_state & XHCI_STATE_HALTED) + return -ENODEV; + if (check_virt_dev) { - xhci = hcd_to_xhci(hcd); if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { printk(KERN_DEBUG "xHCI %s called with unaddressed " @@ -1242,7 +1246,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_urb_free_priv(xhci, urb_priv); return ret; } - if (xhci->xhc_state & XHCI_STATE_DYING) { + if ((xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " "non-responsive xHCI host.\n", urb->ep->desc.bEndpointAddress, urb); @@ -2665,7 +2670,10 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) int i, ret; ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); - if (ret <= 0) + /* If the host is halted due to driver unload, we still need to free the + * device. + */ + if (ret <= 0 && ret != -ENODEV) return; virt_dev = xhci->devs[udev->slot_id]; @@ -2679,7 +2687,8 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) spin_lock_irqsave(&xhci->lock, flags); /* Don't disable the slot if the host controller is dead. */ state = xhci_readl(xhci, &xhci->op_regs->status); - if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { + if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || + (xhci->xhc_state & XHCI_STATE_HALTED)) { xhci_free_virt_device(xhci, udev->slot_id); spin_unlock_irqrestore(&xhci->lock, flags); return; -- cgit v0.10.2 From 6768458b17f9bf48a4c3a34e49b20344091b5f7e Mon Sep 17 00:00:00 2001 From: JiSheng Zhang Date: Sat, 16 Jul 2011 11:04:19 +0800 Subject: USB: xhci: fix OS want to own HC Software should set XHCI_HC_OS_OWNED bit to request ownership of xHC. This patch should be backported to kernels as far back as 2.6.31. Signed-off-by: JiSheng Zhang Signed-off-by: Sarah Sharp Cc: stable@kernel.org diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a9d3159..f4d1b69 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -817,7 +817,7 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) /* If the BIOS owns the HC, signal that the OS wants it, and wait */ if (val & XHCI_HC_BIOS_OWNED) { - writel(val & XHCI_HC_OS_OWNED, base + ext_cap_offset); + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); /* Wait for 5 seconds with 10 microsecond polling interval */ timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, -- cgit v0.10.2 From 7de7c7d2cb49900e0b967be871bf695c7d6135c9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 29 Jul 2011 11:05:45 +0200 Subject: usb/config: use proper endian access for wMaxPacketSize wMaxPacketSize is __le16 and should be accessed as such. Also fix the wBytesPerInterval assignment while here. v2: also fix the wBytesPerInterval assigment, noticed by Matt Evans This patch should be backported to the 3.0 kernel. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Sarah Sharp Acked-by: Matt Evans Cc: stable@kernel.org diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index c962608..26678ca 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -123,10 +123,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, } if (usb_endpoint_xfer_isoc(&ep->desc)) - max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1) * - (desc->bmAttributes + 1); + max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) * + le16_to_cpu(ep->desc.wMaxPacketSize); else if (usb_endpoint_xfer_int(&ep->desc)) - max_tx = ep->desc.wMaxPacketSize * (desc->bMaxBurst + 1); + max_tx = le16_to_cpu(ep->desc.wMaxPacketSize) * + (desc->bMaxBurst + 1); else max_tx = 999999; if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { @@ -134,10 +135,10 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, "config %d interface %d altsetting %d ep %d: " "setting to %d\n", usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", - desc->wBytesPerInterval, + le16_to_cpu(desc->wBytesPerInterval), cfgno, inum, asnum, ep->desc.bEndpointAddress, max_tx); - ep->ss_ep_comp.wBytesPerInterval = max_tx; + ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); } } -- cgit v0.10.2 From 1c8007b0769d37aa5fcb343b383b0af89ade2f71 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 1 Aug 2011 12:41:00 -0500 Subject: jfs: flush journal completely before releasing metadata inodes This fixes a race during unmount. We need to not only make sure that the journal is completely written, but that the metadata changes make it to disk before releasing ipimap and ipbmap. Signed-off-by: Dave Kleikamp diff --git a/fs/jfs/jfs_umount.c b/fs/jfs/jfs_umount.c index adcf92d..7971f37 100644 --- a/fs/jfs/jfs_umount.c +++ b/fs/jfs/jfs_umount.c @@ -68,7 +68,7 @@ int jfs_umount(struct super_block *sb) /* * Wait for outstanding transactions to be written to log: */ - jfs_flush_journal(log, 1); + jfs_flush_journal(log, 2); /* * close fileset inode allocation map (aka fileset inode) @@ -146,7 +146,7 @@ int jfs_umount_rw(struct super_block *sb) * * remove file system from log active file system list. */ - jfs_flush_journal(log, 1); + jfs_flush_journal(log, 2); /* * Make sure all metadata makes it to disk -- cgit v0.10.2 From b03e7495a862b028294f59fc87286d6d78ee7fa1 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Wed, 20 Jul 2011 15:20:54 -0500 Subject: PCI: Set PCI-E Max Payload Size on fabric On a given PCI-E fabric, each device, bridge, and root port can have a different PCI-E maximum payload size. There is a sizable performance boost for having the largest possible maximum payload size on each PCI-E device. However, if improperly configured, fatal bus errors can occur. Thus, it is important to ensure that PCI-E payloads sends by a device are never larger than the MPS setting of all devices on the way to the destination. This can be achieved two ways: - A conservative approach is to use the smallest common denominator of the entire tree below a root complex for every device on that fabric. This means for example that having a 128 bytes MPS USB controller on one leg of a switch will dramatically reduce performances of a video card or 10GE adapter on another leg of that same switch. It also means that any hierarchy supporting hotplug slots (including expresscard or thunderbolt I suppose, dbl check that) will have to be entirely clamped to 128 bytes since we cannot predict what will be plugged into those slots, and we cannot change the MPS on a "live" system. - A more optimal way is possible, if it falls within a couple of constraints: * The top-level host bridge will never generate packets larger than the smallest TLP (or if it can be controlled independently from its MPS at least) * The device will never generate packets larger than MPS (which can be configured via MRRS) * No support of direct PCI-E <-> PCI-E transfers between devices without some additional code to specifically deal with that case Then we can use an approach that basically ignores downstream requests and focuses exclusively on upstream requests. In that case, all we need to care about is that a device MPS is no larger than its parent MPS, which allows us to keep all switches/bridges to the max MPS supported by their parent and eventually the PHB. In this case, your USB controller would no longer "starve" your 10GE Ethernet and your hotplug slots won't affect your global MPS. Additionally, the hotplugged devices themselves can be configured to a larger MPS up to the value configured in the hotplug bridge. To choose between the two available options, two PCI kernel boot args have been added to the PCI calls. "pcie_bus_safe" will provide the former behavior, while "pcie_bus_perf" will perform the latter behavior. By default, the latter behavior is used. NOTE: due to the location of the enablement, each arch will need to add calls to this function. This patch only enables x86. This patch includes a number of changes recommended by Benjamin Herrenschmidt. Tested-by: Jordan_Hargrave@dell.com Signed-off-by: Jon Mason Signed-off-by: Jesse Barnes diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index ae3cb23..c953302 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -360,6 +360,15 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root) } } + /* After the PCI-E bus has been walked and all devices discovered, + * configure any settings of the fabric that might be necessary. + */ + if (bus) { + struct pci_bus *child; + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child, child->self->pcie_mpss); + } + if (!bus) kfree(sd); diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 749fdf0..753b21a 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -158,47 +158,6 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) */ } -/* Program PCIE MaxPayload setting on device: ensure parent maxpayload <= device */ -static int pci_set_payload(struct pci_dev *dev) -{ - int pos, ppos; - u16 pctl, psz; - u16 dctl, dsz, dcap, dmax; - struct pci_dev *parent; - - parent = dev->bus->self; - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return 0; - - /* Read Device MaxPayload capability and setting */ - pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &dctl); - pci_read_config_word(dev, pos + PCI_EXP_DEVCAP, &dcap); - dsz = (dctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; - dmax = (dcap & PCI_EXP_DEVCAP_PAYLOAD); - - /* Read Parent MaxPayload setting */ - ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); - if (!ppos) - return 0; - pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); - psz = (pctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; - - /* If parent payload > device max payload -> error - * If parent payload > device payload -> set speed - * If parent payload <= device payload -> do nothing - */ - if (psz > dmax) - return -1; - else if (psz > dsz) { - dev_info(&dev->dev, "Setting MaxPayload to %d\n", 128 << psz); - pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, - (dctl & ~PCI_EXP_DEVCTL_PAYLOAD) + - (psz << 5)); - } - return 0; -} - void pci_configure_slot(struct pci_dev *dev) { struct pci_dev *cdev; @@ -210,9 +169,7 @@ void pci_configure_slot(struct pci_dev *dev) (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; - ret = pci_set_payload(dev); - if (ret) - dev_warn(&dev->dev, "could not set device max payload\n"); + pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 08a95b3..466fad6 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -77,6 +77,8 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; +enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; + /* * The default CLS is used if arch didn't set CLS explicitly and not * all pci devices agree on the same value. Arch can override either @@ -3223,6 +3225,67 @@ out: EXPORT_SYMBOL(pcie_set_readrq); /** + * pcie_get_mps - get PCI Express maximum payload size + * @dev: PCI device to query + * + * Returns maximum payload size in bytes + * or appropriate error value. + */ +int pcie_get_mps(struct pci_dev *dev) +{ + int ret, cap; + u16 ctl; + + cap = pci_pcie_cap(dev); + if (!cap) + return -EINVAL; + + ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); + if (!ret) + ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); + + return ret; +} + +/** + * pcie_set_mps - set PCI Express maximum payload size + * @dev: PCI device to query + * @rq: maximum payload size in bytes + * valid values are 128, 256, 512, 1024, 2048, 4096 + * + * If possible sets maximum payload size + */ +int pcie_set_mps(struct pci_dev *dev, int mps) +{ + int cap, err = -EINVAL; + u16 ctl, v; + + if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) + goto out; + + v = ffs(mps) - 8; + if (v > dev->pcie_mpss) + goto out; + v <<= 5; + + cap = pci_pcie_cap(dev); + if (!cap) + goto out; + + err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); + if (err) + goto out; + + if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) { + ctl &= ~PCI_EXP_DEVCTL_PAYLOAD; + ctl |= v; + err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl); + } +out: + return err; +} + +/** * pci_select_bars - Make BAR mask from the type of resource * @dev: the PCI device for which BAR mask is made * @flags: resource type mask to be selected @@ -3505,6 +3568,10 @@ static int __init pci_setup(char *str) pci_hotplug_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { pci_hotplug_mem_size = memparse(str + 10, &str); + } else if (!strncmp(str, "pcie_bus_safe", 13)) { + pcie_bus_config = PCIE_BUS_SAFE; + } else if (!strncmp(str, "pcie_bus_perf", 13)) { + pcie_bus_config = PCIE_BUS_PERFORMANCE; } else { printk(KERN_ERR "PCI: Unknown option `%s'\n", str); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 795c902..5becf7c 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -856,6 +856,8 @@ void set_pcie_port_type(struct pci_dev *pdev) pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; + pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16); + pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; } void set_pcie_hotplug_bridge(struct pci_dev *pdev) @@ -1326,6 +1328,149 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) return nr; } +static int pcie_find_smpss(struct pci_dev *dev, void *data) +{ + u8 *smpss = data; + + if (!pci_is_pcie(dev)) + return 0; + + /* For PCIE hotplug enabled slots not connected directly to a + * PCI-E root port, there can be problems when hotplugging + * devices. This is due to the possibility of hotplugging a + * device into the fabric with a smaller MPS that the devices + * currently running have configured. Modifying the MPS on the + * running devices could cause a fatal bus error due to an + * incoming frame being larger than the newly configured MPS. + * To work around this, the MPS for the entire fabric must be + * set to the minimum size. Any devices hotplugged into this + * fabric will have the minimum MPS set. If the PCI hotplug + * slot is directly connected to the root port and there are not + * other devices on the fabric (which seems to be the most + * common case), then this is not an issue and MPS discovery + * will occur as normal. + */ + if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) || + dev->bus->self->pcie_type != PCI_EXP_TYPE_ROOT_PORT)) + *smpss = 0; + + if (*smpss > dev->pcie_mpss) + *smpss = dev->pcie_mpss; + + return 0; +} + +static void pcie_write_mps(struct pci_dev *dev, int mps) +{ + int rc, dev_mpss; + + dev_mpss = 128 << dev->pcie_mpss; + + if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { + if (dev->bus->self) { + dev_dbg(&dev->bus->dev, "Bus MPSS %d\n", + 128 << dev->bus->self->pcie_mpss); + + /* For "MPS Force Max", the assumption is made that + * downstream communication will never be larger than + * the MRRS. So, the MPS only needs to be configured + * for the upstream communication. This being the case, + * walk from the top down and set the MPS of the child + * to that of the parent bus. + */ + mps = 128 << dev->bus->self->pcie_mpss; + if (mps > dev_mpss) + dev_warn(&dev->dev, "MPS configured higher than" + " maximum supported by the device. If" + " a bus issue occurs, try running with" + " pci=pcie_bus_safe.\n"); + } + + dev->pcie_mpss = ffs(mps) - 8; + } + + rc = pcie_set_mps(dev, mps); + if (rc) + dev_err(&dev->dev, "Failed attempting to set the MPS\n"); +} + +static void pcie_write_mrrs(struct pci_dev *dev, int mps) +{ + int rc, mrrs; + + if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { + int dev_mpss = 128 << dev->pcie_mpss; + + /* For Max performance, the MRRS must be set to the largest + * supported value. However, it cannot be configured larger + * than the MPS the device or the bus can support. This assumes + * that the largest MRRS available on the device cannot be + * smaller than the device MPSS. + */ + mrrs = mps < dev_mpss ? mps : dev_mpss; + } else + /* In the "safe" case, configure the MRRS for fairness on the + * bus by making all devices have the same size + */ + mrrs = mps; + + + /* MRRS is a R/W register. Invalid values can be written, but a + * subsiquent read will verify if the value is acceptable or not. + * If the MRRS value provided is not acceptable (e.g., too large), + * shrink the value until it is acceptable to the HW. + */ + while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { + rc = pcie_set_readrq(dev, mrrs); + if (rc) + dev_err(&dev->dev, "Failed attempting to set the MRRS\n"); + + mrrs /= 2; + } +} + +static int pcie_bus_configure_set(struct pci_dev *dev, void *data) +{ + int mps = 128 << *(u8 *)data; + + if (!pci_is_pcie(dev)) + return 0; + + dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", + pcie_get_mps(dev), 128<pcie_mpss, pcie_get_readrq(dev)); + + pcie_write_mps(dev, mps); + pcie_write_mrrs(dev, mps); + + dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n", + pcie_get_mps(dev), 128<pcie_mpss, pcie_get_readrq(dev)); + + return 0; +} + +/* pcie_bus_configure_mps requires that pci_walk_bus work in a top-down, + * parents then children fashion. If this changes, then this code will not + * work as designed. + */ +void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) +{ + u8 smpss = mpss; + + if (!bus->self) + return; + + if (!pci_is_pcie(bus->self)) + return; + + if (pcie_bus_config == PCIE_BUS_SAFE) { + pcie_find_smpss(bus->self, &smpss); + pci_walk_bus(bus, pcie_find_smpss, &smpss); + } + + pcie_bus_configure_set(bus->self, &smpss); + pci_walk_bus(bus, pcie_bus_configure_set, &smpss); +} + unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) { unsigned int devfn, pass, max = bus->secondary; diff --git a/include/linux/pci.h b/include/linux/pci.h index f27893b..1ff9bba 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -251,7 +251,8 @@ struct pci_dev { u8 revision; /* PCI revision, low byte of class word */ u8 hdr_type; /* PCI header type (`multi' flag masked out) */ u8 pcie_cap; /* PCI-E capability offset */ - u8 pcie_type; /* PCI-E device/port type */ + u8 pcie_type:4; /* PCI-E device/port type */ + u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */ u8 rom_base_reg; /* which config register controls the ROM */ u8 pin; /* which interrupt pin this device uses */ @@ -617,6 +618,16 @@ struct pci_driver { /* these external functions are only available when PCI support is enabled */ #ifdef CONFIG_PCI +extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); + +enum pcie_bus_config_types { + PCIE_BUS_PERFORMANCE, + PCIE_BUS_SAFE, + PCIE_BUS_PEER2PEER, +}; + +extern enum pcie_bus_config_types pcie_bus_config; + extern struct bus_type pci_bus_type; /* Do NOT directly access these two variables, unless you are arch specific pci @@ -796,6 +807,8 @@ int pcix_get_mmrbc(struct pci_dev *dev); int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); int pcie_get_readrq(struct pci_dev *dev); int pcie_set_readrq(struct pci_dev *dev, int rq); +int pcie_get_mps(struct pci_dev *dev); +int pcie_set_mps(struct pci_dev *dev, int mps); int __pci_reset_function(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); -- cgit v0.10.2 From be768912a49b10b68e96fbd8fa3cab0adfbd3091 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 25 Jul 2011 13:08:38 -0700 Subject: PCI: honor child buses add_size in hot plug configuration git commit c8adf9a3e873eddaaec11ac410a99ef6b9656938 "PCI: pre-allocate additional resources to devices only after successful allocation of essential resources." fails to take into consideration the optional-resources needed by children devices while calculating the optional-resource needed by the bridge. This can be a problem on some setup. For example, if a hotplug bridge has 8 children hotplug bridges, the bridge should have enough resources to accomodate the hotplug requirements for each of its children hotplug bridges. Currently this is not the case. This patch fixes the problem. Signed-off-by: Yinghai Lu Reviewed-by: Ram Pai Signed-off-by: Jesse Barnes diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 8a1d3c7..4409cd0 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -540,6 +540,20 @@ static resource_size_t calculate_memsize(resource_size_t size, return size; } +static resource_size_t get_res_add_size(struct resource_list_x *add_head, + struct resource *res) +{ + struct resource_list_x *list; + + /* check if it is in add_head list */ + for (list = add_head->next; list && list->res != res; + list = list->next); + if (list) + return list->add_size; + + return 0; +} + /** * pbus_size_io() - size the io window of a given bus * @@ -559,6 +573,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, struct pci_dev *dev; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); unsigned long size = 0, size0 = 0, size1 = 0; + resource_size_t children_add_size = 0; if (!b_res) return; @@ -579,10 +594,15 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, size += r_size; else size1 += r_size; + + if (add_head) + children_add_size += get_res_add_size(add_head, r); } } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); + if (children_add_size > add_size) + add_size = children_add_size; size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_iosize(size, min_size+add_size, size1, resource_size(b_res), 4096); @@ -624,6 +644,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, int order, max_order; struct resource *b_res = find_free_bus_resource(bus, type); unsigned int mem64_mask = 0; + resource_size_t children_add_size = 0; if (!b_res) return 0; @@ -665,6 +686,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (order > max_order) max_order = order; mem64_mask &= r->flags & IORESOURCE_MEM_64; + + if (add_head) + children_add_size += get_res_add_size(add_head, r); } } align = 0; @@ -681,6 +705,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); + if (children_add_size > add_size) + add_size = children_add_size; size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_memsize(size, min_size+add_size, 0, resource_size(b_res), min_align); -- cgit v0.10.2 From 2bbc6942273b5b3097bd265d82227bdd84b351b2 Mon Sep 17 00:00:00 2001 From: Ram Pai Date: Mon, 25 Jul 2011 13:08:39 -0700 Subject: PCI : ability to relocate assigned pci-resources Currently pci-bridges are allocated enough resources to satisfy their immediate requirements. Any additional resource-requests fail if additional free space, contiguous to the one already allocated, is not available. This behavior is not reasonable since sufficient contiguous resources, that can satisfy the request, are available at a different location. This patch provides the ability to expand and relocate a allocated resource. v2: Changelog: Fixed size calculation in pci_reassign_resource() v3: Changelog : Split this patch. The resource.c changes are already upstream. All the pci driver changes are in here. Signed-off-by: Ram Pai Signed-off-by: Jesse Barnes diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 4409cd0..1796c6f 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -34,6 +34,7 @@ struct resource_list_x { resource_size_t start; resource_size_t end; resource_size_t add_size; + resource_size_t min_align; unsigned long flags; }; @@ -65,7 +66,7 @@ void pci_realloc(void) */ static void add_to_list(struct resource_list_x *head, struct pci_dev *dev, struct resource *res, - resource_size_t add_size) + resource_size_t add_size, resource_size_t min_align) { struct resource_list_x *list = head; struct resource_list_x *ln = list->next; @@ -84,13 +85,16 @@ static void add_to_list(struct resource_list_x *head, tmp->end = res->end; tmp->flags = res->flags; tmp->add_size = add_size; + tmp->min_align = min_align; list->next = tmp; } static void add_to_failed_list(struct resource_list_x *head, struct pci_dev *dev, struct resource *res) { - add_to_list(head, dev, res, 0); + add_to_list(head, dev, res, + 0 /* dont care */, + 0 /* dont care */); } static void __dev_sort_resources(struct pci_dev *dev, @@ -159,13 +163,16 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, idx = res - &list->dev->resource[0]; add_size=list->add_size; - if (!resource_size(res) && add_size) { - res->end = res->start + add_size - 1; - if(pci_assign_resource(list->dev, idx)) + if (!resource_size(res)) { + res->end = res->start + add_size - 1; + if(pci_assign_resource(list->dev, idx)) reset_resource(res); - } else if (add_size) { - adjust_resource(res, res->start, - resource_size(res) + add_size); + } else { + resource_size_t align = list->min_align; + res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); + if (pci_reassign_resource(list->dev, idx, add_size, align)) + dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n", + res); } out: tmp = list; @@ -619,7 +626,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, b_res->end = b_res->start + size0 - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && add_head) - add_to_list(add_head, bus->self, b_res, size1-size0); + add_to_list(add_head, bus->self, b_res, size1-size0, 4096); } /** @@ -722,7 +729,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; if (size1 > size0 && add_head) - add_to_list(add_head, bus->self, b_res, size1-size0); + add_to_list(add_head, bus->self, b_res, size1-size0, min_align); return 1; } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 319f359..51a9095 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -128,16 +128,16 @@ void pci_disable_bridge_window(struct pci_dev *dev) } #endif /* CONFIG_PCI_QUIRKS */ + + static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, - int resno) + int resno, resource_size_t size, resource_size_t align) { struct resource *res = dev->resource + resno; - resource_size_t size, min, align; + resource_size_t min; int ret; - size = resource_size(res); min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; - align = pci_resource_alignment(dev, res); /* First, try exact prefetching match.. */ ret = pci_bus_alloc_resource(bus, res, size, align, min, @@ -154,56 +154,101 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, pcibios_align_resource, dev); } + return ret; +} - if (ret < 0 && dev->fw_addr[resno]) { - struct resource *root, *conflict; - resource_size_t start, end; +static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, + int resno, resource_size_t size) +{ + struct resource *root, *conflict; + resource_size_t start, end; + int ret = 0; - /* - * If we failed to assign anything, let's try the address - * where firmware left it. That at least has a chance of - * working, which is better than just leaving it disabled. - */ + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + else + root = &iomem_resource; + + start = res->start; + end = res->end; + res->start = dev->fw_addr[resno]; + res->end = res->start + size - 1; + dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", + resno, res); + conflict = request_resource_conflict(root, res); + if (conflict) { + dev_info(&dev->dev, + "BAR %d: %pR conflicts with %s %pR\n", resno, + res, conflict->name, conflict); + res->start = start; + res->end = end; + ret = 1; + } + return ret; +} + +static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align) +{ + struct resource *res = dev->resource + resno; + struct pci_bus *bus; + int ret; + char *type; - if (res->flags & IORESOURCE_IO) - root = &ioport_resource; + bus = dev->bus; + while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { + if (!bus->parent || !bus->self->transparent) + break; + bus = bus->parent; + } + + if (ret) { + if (res->flags & IORESOURCE_MEM) + if (res->flags & IORESOURCE_PREFETCH) + type = "mem pref"; + else + type = "mem"; + else if (res->flags & IORESOURCE_IO) + type = "io"; else - root = &iomem_resource; - - start = res->start; - end = res->end; - res->start = dev->fw_addr[resno]; - res->end = res->start + size - 1; - dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n", - resno, res); - conflict = request_resource_conflict(root, res); - if (conflict) { - dev_info(&dev->dev, - "BAR %d: %pR conflicts with %s %pR\n", resno, - res, conflict->name, conflict); - res->start = start; - res->end = end; - } else - ret = 0; + type = "unknown"; + dev_info(&dev->dev, + "BAR %d: can't assign %s (size %#llx)\n", + resno, type, (unsigned long long) resource_size(res)); } + return ret; +} + +int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, + resource_size_t min_align) +{ + struct resource *res = dev->resource + resno; + resource_size_t new_size; + int ret; + + if (!res->parent) { + dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resouce %pR " + "\n", resno, res); + return -EINVAL; + } + + new_size = resource_size(res) + addsize + min_align; + ret = _pci_assign_resource(dev, resno, new_size, min_align); if (!ret) { res->flags &= ~IORESOURCE_STARTALIGN; dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); } - return ret; } int pci_assign_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; - resource_size_t align; + resource_size_t align, size; struct pci_bus *bus; int ret; - char *type; align = pci_resource_alignment(dev, res); if (!align) { @@ -213,34 +258,27 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } bus = dev->bus; - while ((ret = __pci_assign_resource(bus, dev, resno))) { - if (bus->parent && bus->self->transparent) - bus = bus->parent; - else - bus = NULL; - if (bus) - continue; - break; - } + size = resource_size(res); + ret = _pci_assign_resource(dev, resno, size, align); - if (ret) { - if (res->flags & IORESOURCE_MEM) - if (res->flags & IORESOURCE_PREFETCH) - type = "mem pref"; - else - type = "mem"; - else if (res->flags & IORESOURCE_IO) - type = "io"; - else - type = "unknown"; - dev_info(&dev->dev, - "BAR %d: can't assign %s (size %#llx)\n", - resno, type, (unsigned long long) resource_size(res)); - } + /* + * If we failed to assign anything, let's try the address + * where firmware left it. That at least has a chance of + * working, which is better than just leaving it disabled. + */ + if (ret < 0 && dev->fw_addr[resno]) + ret = pci_revert_fw_address(res, dev, resno, size); + if (!ret) { + res->flags &= ~IORESOURCE_STARTALIGN; + dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); + if (resno < PCI_BRIDGE_RESOURCES) + pci_update_resource(dev, resno); + } return ret; } + /* Sort resources by alignment */ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) { diff --git a/include/linux/pci.h b/include/linux/pci.h index 1ff9bba..8c230cb 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -813,6 +813,7 @@ int __pci_reset_function(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); +int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align); int pci_select_bars(struct pci_dev *dev, unsigned long flags); /* ROM control related routines */ -- cgit v0.10.2 From 2aceefcbd5a73059e5f52831817ec277e987440d Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 25 Jul 2011 13:08:40 -0700 Subject: PCI: make SRIOV resources optional From: Yinghai Lu Allocate resources to SRIOV BARs only after all other required resource-requests are satisfied. Dont retry if resource allocation for SRIOV BARs fail. Signed-off-by: Ram Pai Signed-off-by: Yinghai Lu Signed-off-by: Jesse Barnes diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 1796c6f..1c19b9f 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -673,6 +673,16 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (r->parent || (r->flags & mask) != type) continue; r_size = resource_size(r); +#ifdef CONFIG_PCI_IOV + /* put SRIOV requested res to the optional list */ + if (add_head && i >= PCI_IOV_RESOURCES && + i <= PCI_IOV_RESOURCE_END) { + r->end = r->start - 1; + add_to_list(add_head, dev, r, r_size, 1); + children_add_size += r_size; + continue; + } +#endif /* For bridges size != alignment */ align = pci_resource_alignment(dev, r); order = __ffs(align) - 20; -- cgit v0.10.2 From 0a2daa1cf35004f5adbf4138555cc5669abf3a3e Mon Sep 17 00:00:00 2001 From: Ram Pai Date: Mon, 25 Jul 2011 13:08:41 -0700 Subject: PCI: make cardbus-bridge resources optional Allocate resources to cardbus bridge only after all other genuine resources requests are satisfied. Dont retry if resource allocation for cardbus-bridges fail. Signed-off-by: Ram Pai Signed-off-by: Jesse Barnes diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index c8cee76..b74084e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -283,6 +283,8 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) #endif /* CONFIG_PCI_IOV */ +extern unsigned long pci_cardbus_resource_alignment(struct resource *); + static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, struct resource *res) { @@ -292,6 +294,8 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) return pci_sriov_resource_alignment(dev, resno); #endif + if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS) + return pci_cardbus_resource_alignment(res); return resource_alignment(res); } diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 1c19b9f..29e7cc7 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -164,6 +164,7 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, idx = res - &list->dev->resource[0]; add_size=list->add_size; if (!resource_size(res)) { + res->start = list->start; res->end = res->start + add_size - 1; if(pci_assign_resource(list->dev, idx)) reset_resource(res); @@ -223,7 +224,7 @@ static void __assign_resources_sorted(struct resource_list *head, /* Satisfy the must-have resource requests */ assign_requested_resources_sorted(head, fail_head); - /* Try to satisfy any additional nice-to-have resource + /* Try to satisfy any additional optional resource requests */ if (add_head) adjust_resources_sorted(add_head, head); @@ -678,7 +679,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, if (add_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { r->end = r->start - 1; - add_to_list(add_head, dev, r, r_size, 1); + add_to_list(add_head, dev, r, r_size, 0/* dont' care */); children_add_size += r_size; continue; } @@ -743,7 +744,17 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, return 1; } -static void pci_bus_size_cardbus(struct pci_bus *bus) +unsigned long pci_cardbus_resource_alignment(struct resource *res) +{ + if (res->flags & IORESOURCE_IO) + return pci_cardbus_io_size; + if (res->flags & IORESOURCE_MEM) + return pci_cardbus_mem_size; + return 0; +} + +static void pci_bus_size_cardbus(struct pci_bus *bus, + struct resource_list_x *add_head) { struct pci_dev *bridge = bus->self; struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; @@ -754,12 +765,14 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) * a fixed amount of bus space for CardBus bridges. */ b_res[0].start = 0; - b_res[0].end = pci_cardbus_io_size - 1; b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; + if (add_head) + add_to_list(add_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); b_res[1].start = 0; - b_res[1].end = pci_cardbus_io_size - 1; b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; + if (add_head) + add_to_list(add_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); /* * Check whether prefetchable memory is supported @@ -779,17 +792,27 @@ static void pci_bus_size_cardbus(struct pci_bus *bus) */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { b_res[2].start = 0; - b_res[2].end = pci_cardbus_mem_size - 1; b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; + if (add_head) + add_to_list(add_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); b_res[3].start = 0; - b_res[3].end = pci_cardbus_mem_size - 1; b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; + if (add_head) + add_to_list(add_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); } else { b_res[3].start = 0; - b_res[3].end = pci_cardbus_mem_size * 2 - 1; b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; + if (add_head) + add_to_list(add_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); } + + /* set the size of the resource to zero, so that the resource does not + * get assigned during required-resource allocation cycle but gets assigned + * during the optional-resource allocation cycle. + */ + b_res[0].start = b_res[1].start = b_res[2].start = b_res[3].start = 1; + b_res[0].end = b_res[1].end = b_res[2].end = b_res[3].end = 0; } void __ref __pci_bus_size_bridges(struct pci_bus *bus, @@ -806,7 +829,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_CARDBUS: - pci_bus_size_cardbus(b); + pci_bus_size_cardbus(b, add_head); break; case PCI_CLASS_BRIDGE_PCI: -- cgit v0.10.2 From 9e8bf93a7f416a3fa8fb6d76177d90e67bd45496 Mon Sep 17 00:00:00 2001 From: Ram Pai Date: Mon, 25 Jul 2011 13:08:42 -0700 Subject: PCI: code and comments cleanup a) adjust_resource_sorted() is now called reassign_resource_sorted() b) nice-to-have is now called optional c) add_list is now called realloc_list. Signed-off-by: Ram Pai Signed-off-by: Jesse Barnes diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 29e7cc7..784da9d 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -125,18 +125,18 @@ static inline void reset_resource(struct resource *res) } /** - * adjust_resources_sorted() - satisfy any additional resource requests + * reassign_resources_sorted() - satisfy any additional resource requests * - * @add_head : head of the list tracking requests requiring additional + * @realloc_head : head of the list tracking requests requiring additional * resources * @head : head of the list tracking requests with allocated * resources * - * Walk through each element of the add_head and try to procure + * Walk through each element of the realloc_head and try to procure * additional resources for the element, provided the element * is in the head list. */ -static void adjust_resources_sorted(struct resource_list_x *add_head, +static void reassign_resources_sorted(struct resource_list_x *realloc_head, struct resource_list *head) { struct resource *res; @@ -145,8 +145,8 @@ static void adjust_resources_sorted(struct resource_list_x *add_head, resource_size_t add_size; int idx; - prev = add_head; - for (list = add_head->next; list;) { + prev = realloc_head; + for (list = realloc_head->next; list;) { res = list->res; /* skip resource that has been reset */ if (!res->flags) @@ -218,7 +218,7 @@ static void assign_requested_resources_sorted(struct resource_list *head, } static void __assign_resources_sorted(struct resource_list *head, - struct resource_list_x *add_head, + struct resource_list_x *realloc_head, struct resource_list_x *fail_head) { /* Satisfy the must-have resource requests */ @@ -226,8 +226,8 @@ static void __assign_resources_sorted(struct resource_list *head, /* Try to satisfy any additional optional resource requests */ - if (add_head) - adjust_resources_sorted(add_head, head); + if (realloc_head) + reassign_resources_sorted(realloc_head, head); free_list(resource_list, head); } @@ -243,7 +243,7 @@ static void pdev_assign_resources_sorted(struct pci_dev *dev, } static void pbus_assign_resources_sorted(const struct pci_bus *bus, - struct resource_list_x *add_head, + struct resource_list_x *realloc_head, struct resource_list_x *fail_head) { struct pci_dev *dev; @@ -253,7 +253,7 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus, list_for_each_entry(dev, &bus->devices, bus_list) __dev_sort_resources(dev, &head); - __assign_resources_sorted(&head, add_head, fail_head); + __assign_resources_sorted(&head, realloc_head, fail_head); } void pci_setup_cardbus(struct pci_bus *bus) @@ -548,13 +548,13 @@ static resource_size_t calculate_memsize(resource_size_t size, return size; } -static resource_size_t get_res_add_size(struct resource_list_x *add_head, +static resource_size_t get_res_add_size(struct resource_list_x *realloc_head, struct resource *res) { struct resource_list_x *list; - /* check if it is in add_head list */ - for (list = add_head->next; list && list->res != res; + /* check if it is in realloc_head list */ + for (list = realloc_head->next; list && list->res != res; list = list->next); if (list) return list->add_size; @@ -568,7 +568,7 @@ static resource_size_t get_res_add_size(struct resource_list_x *add_head, * @bus : the bus * @min_size : the minimum io window that must to be allocated * @add_size : additional optional io window - * @add_head : track the additional io window on this list + * @realloc_head : track the additional io window on this list * * Sizing the IO windows of the PCI-PCI bridge is trivial, * since these windows have 4K granularity and the IO ranges @@ -576,7 +576,7 @@ static resource_size_t get_res_add_size(struct resource_list_x *add_head, * We must be careful with the ISA aliasing though. */ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, - resource_size_t add_size, struct resource_list_x *add_head) + resource_size_t add_size, struct resource_list_x *realloc_head) { struct pci_dev *dev; struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); @@ -603,15 +603,15 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, else size1 += r_size; - if (add_head) - children_add_size += get_res_add_size(add_head, r); + if (realloc_head) + children_add_size += get_res_add_size(realloc_head, r); } } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); if (children_add_size > add_size) add_size = children_add_size; - size1 = (!add_head || (add_head && !add_size)) ? size0 : + size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : calculate_iosize(size, min_size+add_size, size1, resource_size(b_res), 4096); if (!size0 && !size1) { @@ -626,8 +626,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, b_res->start = 4096; b_res->end = b_res->start + size0 - 1; b_res->flags |= IORESOURCE_STARTALIGN; - if (size1 > size0 && add_head) - add_to_list(add_head, bus->self, b_res, size1-size0, 4096); + if (size1 > size0 && realloc_head) + add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096); } /** @@ -636,7 +636,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, * @bus : the bus * @min_size : the minimum memory window that must to be allocated * @add_size : additional optional memory window - * @add_head : track the additional memory window on this list + * @realloc_head : track the additional memory window on this list * * Calculate the size of the bus and minimal alignment which * guarantees that all child resources fit in this size. @@ -644,7 +644,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type, resource_size_t min_size, resource_size_t add_size, - struct resource_list_x *add_head) + struct resource_list_x *realloc_head) { struct pci_dev *dev; resource_size_t min_align, align, size, size0, size1; @@ -676,10 +676,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, r_size = resource_size(r); #ifdef CONFIG_PCI_IOV /* put SRIOV requested res to the optional list */ - if (add_head && i >= PCI_IOV_RESOURCES && + if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { r->end = r->start - 1; - add_to_list(add_head, dev, r, r_size, 0/* dont' care */); + add_to_list(realloc_head, dev, r, r_size, 0/* dont' care */); children_add_size += r_size; continue; } @@ -705,8 +705,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, max_order = order; mem64_mask &= r->flags & IORESOURCE_MEM_64; - if (add_head) - children_add_size += get_res_add_size(add_head, r); + if (realloc_head) + children_add_size += get_res_add_size(realloc_head, r); } } align = 0; @@ -725,7 +725,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); if (children_add_size > add_size) add_size = children_add_size; - size1 = (!add_head || (add_head && !add_size)) ? size0 : + size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : calculate_memsize(size, min_size+add_size, 0, resource_size(b_res), min_align); if (!size0 && !size1) { @@ -739,8 +739,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, b_res->start = min_align; b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; - if (size1 > size0 && add_head) - add_to_list(add_head, bus->self, b_res, size1-size0, min_align); + if (size1 > size0 && realloc_head) + add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); return 1; } @@ -754,7 +754,7 @@ unsigned long pci_cardbus_resource_alignment(struct resource *res) } static void pci_bus_size_cardbus(struct pci_bus *bus, - struct resource_list_x *add_head) + struct resource_list_x *realloc_head) { struct pci_dev *bridge = bus->self; struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; @@ -766,13 +766,13 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, */ b_res[0].start = 0; b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; - if (add_head) - add_to_list(add_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); + if (realloc_head) + add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, 0 /* dont care */); b_res[1].start = 0; b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN; - if (add_head) - add_to_list(add_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); + if (realloc_head) + add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size, 0 /* dont care */); /* * Check whether prefetchable memory is supported @@ -793,18 +793,18 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { b_res[2].start = 0; b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN; - if (add_head) - add_to_list(add_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); + if (realloc_head) + add_to_list(realloc_head, bridge, b_res+2, pci_cardbus_mem_size, 0 /* dont care */); b_res[3].start = 0; b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; - if (add_head) - add_to_list(add_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); + if (realloc_head) + add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size, 0 /* dont care */); } else { b_res[3].start = 0; b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN; - if (add_head) - add_to_list(add_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); + if (realloc_head) + add_to_list(realloc_head, bridge, b_res+3, pci_cardbus_mem_size * 2, 0 /* dont care */); } /* set the size of the resource to zero, so that the resource does not @@ -816,7 +816,7 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, } void __ref __pci_bus_size_bridges(struct pci_bus *bus, - struct resource_list_x *add_head) + struct resource_list_x *realloc_head) { struct pci_dev *dev; unsigned long mask, prefmask; @@ -829,12 +829,12 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_CARDBUS: - pci_bus_size_cardbus(b, add_head); + pci_bus_size_cardbus(b, realloc_head); break; case PCI_CLASS_BRIDGE_PCI: default: - __pci_bus_size_bridges(b, add_head); + __pci_bus_size_bridges(b, realloc_head); break; } } @@ -858,7 +858,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, * Follow thru */ default: - pbus_size_io(bus, 0, additional_io_size, add_head); + pbus_size_io(bus, 0, additional_io_size, realloc_head); /* If the bridge supports prefetchable range, size it separately. If it doesn't, or its prefetchable window has already been allocated by arch code, try @@ -866,11 +866,11 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, resources. */ mask = IORESOURCE_MEM; prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; - if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head)) + if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, realloc_head)) mask = prefmask; /* Success, size non-prefetch only. */ else additional_mem_size += additional_mem_size; - pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head); + pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, realloc_head); break; } } @@ -882,20 +882,20 @@ void __ref pci_bus_size_bridges(struct pci_bus *bus) EXPORT_SYMBOL(pci_bus_size_bridges); static void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct resource_list_x *add_head, + struct resource_list_x *realloc_head, struct resource_list_x *fail_head) { struct pci_bus *b; struct pci_dev *dev; - pbus_assign_resources_sorted(bus, add_head, fail_head); + pbus_assign_resources_sorted(bus, realloc_head, fail_head); list_for_each_entry(dev, &bus->devices, bus_list) { b = dev->subordinate; if (!b) continue; - __pci_bus_assign_resources(b, add_head, fail_head); + __pci_bus_assign_resources(b, realloc_head, fail_head); switch (dev->class >> 8) { case PCI_CLASS_BRIDGE_PCI: @@ -1105,7 +1105,7 @@ void __init pci_assign_unassigned_resources(void) { struct pci_bus *bus; - struct resource_list_x add_list; /* list of resources that + struct resource_list_x realloc_list; /* list of resources that want additional resources */ int tried_times = 0; enum release_type rel_type = leaf_only; @@ -1118,7 +1118,7 @@ pci_assign_unassigned_resources(void) head.next = NULL; - add_list.next = NULL; + realloc_list.next = NULL; pci_try_num = max_depth + 1; printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n", @@ -1128,12 +1128,12 @@ again: /* Depth first, calculate sizes and alignments of all subordinate buses. */ list_for_each_entry(bus, &pci_root_buses, node) - __pci_bus_size_bridges(bus, &add_list); + __pci_bus_size_bridges(bus, &realloc_list); /* Depth last, allocate resources and update the hardware. */ list_for_each_entry(bus, &pci_root_buses, node) - __pci_bus_assign_resources(bus, &add_list, &head); - BUG_ON(add_list.next); + __pci_bus_assign_resources(bus, &realloc_list, &head); + BUG_ON(realloc_list.next); tried_times++; /* any device complain? */ -- cgit v0.10.2 From 7e9d40f3a834a322f004b6629db55c22ae457d77 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 12 Jul 2011 19:05:29 +0800 Subject: usb: gadget: s3c2410_udc: fix unterminated platform_device_id table platform_device_id structures need a NULL terminating entry, add it. Signed-off-by: Axel Lin Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c index 85c1b0d..8d31848 100644 --- a/drivers/usb/gadget/s3c2410_udc.c +++ b/drivers/usb/gadget/s3c2410_udc.c @@ -2060,6 +2060,7 @@ static int s3c2410_udc_resume(struct platform_device *pdev) static const struct platform_device_id s3c_udc_ids[] = { { "s3c2410-usbgadget", }, { "s3c2440-usbgadget", }, + { } }; MODULE_DEVICE_TABLE(platform, s3c_udc_ids); -- cgit v0.10.2 From 9c5ea0ea27f5dbf17e5c0580060518588f8df7d4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Jul 2011 21:47:01 +0200 Subject: usb: gadget: fusb300: remove #if 0 block The code in this block is unused and the Author is fine with removing: | These functions were used to debug unstable hw fifo while developing | fusb300. It's much more stable now. | So these functions can be removed. Cc: "Wendy Yuan-Hsin Chen" Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c index 24a9243..4ec888f 100644 --- a/drivers/usb/gadget/fusb300_udc.c +++ b/drivers/usb/gadget/fusb300_udc.c @@ -609,107 +609,6 @@ void fusb300_rdcxf(struct fusb300 *fusb300, } } -#if 0 -static void fusb300_dbg_fifo(struct fusb300_ep *ep, - u8 entry, u16 length) -{ - u32 reg; - u32 i = 0; - u32 j = 0; - - reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); - reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | - FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); - reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | - FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); - iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); - - for (i = 0; i < (length >> 2); i++) { - if (i * 4 == 1024) - break; - reg = ioread32(ep->fusb300->reg + - FUSB300_OFFSET_BUFDBG_START + i * 4); - printk(KERN_DEBUG" 0x%-8x", reg); - j++; - if ((j % 4) == 0) - printk(KERN_DEBUG "\n"); - } - - if (length % 4) { - reg = ioread32(ep->fusb300->reg + - FUSB300_OFFSET_BUFDBG_START + i * 4); - printk(KERN_DEBUG " 0x%x\n", reg); - } - - if ((j % 4) != 0) - printk(KERN_DEBUG "\n"); - - fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, - FUSB300_GTM_TST_FIFO_DEG); -} - -static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep, - u8 entry, u16 length, u8 *golden) -{ - u32 reg; - u32 i = 0; - u32 golden_value; - u8 *tmp; - - tmp = golden; - - printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry); - - reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); - reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | - FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); - reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | - FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); - iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); - - for (i = 0; i < (length >> 2); i++) { - if (i * 4 == 1024) - break; - golden_value = *tmp | *(tmp + 1) << 8 | - *(tmp + 2) << 16 | *(tmp + 3) << 24; - - reg = ioread32(ep->fusb300->reg + - FUSB300_OFFSET_BUFDBG_START + i*4); - - if (reg != golden_value) { - printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg + - FUSB300_OFFSET_BUFDBG_START + i*4)); - printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", - golden_value, reg); - } - tmp += 4; - } - - switch (length % 4) { - case 1: - golden_value = *tmp; - case 2: - golden_value = *tmp | *(tmp + 1) << 8; - case 3: - golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; - default: - break; - - reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4); - if (reg != golden_value) { - printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg + - FUSB300_OFFSET_BUFDBG_START + i*4)); - printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", - golden_value, reg); - } - } - - printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n"); - fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, - FUSB300_GTM_TST_FIFO_DEG); -} -#endif - static void fusb300_rdfifo(struct fusb300_ep *ep, struct fusb300_request *req, u32 length) -- cgit v0.10.2 From a8f21156a07cb0b11fddeab771e11ec7c08283c9 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Jul 2011 20:21:52 +0200 Subject: usb: gadget: composite: fix bMaxPacketSize for SuperSpeed For bMaxPacketSize0 we usually take what is specified in ep0->maxpacket. This is fine in most cases, however on SuperSpeed bMaxPacketSize0 specifies the exponent instead of the actual size in bytes. The only valid value on SS is 9 which denotes 512 bytes. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 5ef8779..aef4741 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1079,10 +1079,12 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) cdev->desc.bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; if (gadget_is_superspeed(gadget)) { - if (gadget->speed >= USB_SPEED_SUPER) + if (gadget->speed >= USB_SPEED_SUPER) { cdev->desc.bcdUSB = cpu_to_le16(0x0300); - else + cdev->desc.bMaxPacketSize0 = 9; + } else { cdev->desc.bcdUSB = cpu_to_le16(0x0210); + } } value = min(w_length, (u16) sizeof cdev->desc); -- cgit v0.10.2 From 5574d5f036f6fe52618e450aa09f8f284fafaeeb Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Sun, 17 Jul 2011 18:28:00 +0300 Subject: usb: musb: tusb6010_omap: fix build failure: error: 'musb' undeclared CC drivers/usb/musb/tusb6010_omap.o drivers/usb/musb/tusb6010_omap.c: In function 'tusb_omap_use_shared_dmareq': drivers/usb/musb/tusb6010_omap.c:92: error: 'musb' undeclared (first use in this function) drivers/usb/musb/tusb6010_omap.c:92: error: (Each undeclared identifier is reported only once drivers/usb/musb/tusb6010_omap.c:92: error: for each function it appears in.) Signed-off-by: Sergei Trofimovich Signed-off-by: Felipe Balbi diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index c784e6c..07c8a73 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c @@ -89,7 +89,7 @@ static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat) u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP); if (reg != 0) { - dev_dbg(musb->controller, "ep%i dmareq0 is busy for ep%i\n", + dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n", chdat->epnum, reg & 0xf); return -EAGAIN; } -- cgit v0.10.2 From b61ae3427086ea413aa1fb35feea9e8c4d7c2584 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Mon, 18 Jul 2011 18:38:47 +0530 Subject: usb: musb: fix Kconfig After 622859634 (usb: musb: drop a gigantic amount of ifdeferry): - USB_GADGET_MUSB_HDRC is no longer selectable because it depends on the removed USB_MUSB_PERIPHERAL and USB_MUSB_OTG options - The Kconfig comment still says "Enable Host or Gadget support to see Inventra options", even though you now need to enable both of them to see Inventra options. Fix the dependency and drop the anyway unnecessary comment. Signed-off-by: Rabin Vincent Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 44b6b40..5a084b9 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -310,7 +310,7 @@ config USB_PXA_U2O # musb builds in ../musb along with host support config USB_GADGET_MUSB_HDRC tristate "Inventra HDRC USB Peripheral (TI, ADI, ...)" - depends on USB_MUSB_HDRC && (USB_MUSB_PERIPHERAL || USB_MUSB_OTG) + depends on USB_MUSB_HDRC select USB_GADGET_DUALSPEED help This OTG-capable silicon IP is used in dual designs including diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 6192b45..fc34b8b 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -3,9 +3,6 @@ # for silicon based on Mentor Graphics INVENTRA designs # -comment "Enable Host or Gadget support to see Inventra options" - depends on !USB && USB_GADGET=n - # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller config USB_MUSB_HDRC depends on USB && USB_GADGET -- cgit v0.10.2 From c240d78a8f9b1d4e1d391203f4a698117fae3474 Mon Sep 17 00:00:00 2001 From: Sebastian Bauer Date: Thu, 21 Jul 2011 15:40:07 +0200 Subject: usb: gadget: hid: don't STALL when processing a HID Descriptor request This is a patch to fix an issue with the HID gadget which, at the moment, returns STALL on a HID descriptor request. Essentially, the patch changes the hid gadget such that a request for the HID descriptor is handled by copying the descriptor into the response buffer, rather than falling through the default case, in which the request is answered by a STALL. Signed-off-by: Sebastian Bauer Acked-by: Peter Korsgaard Signed-off-by: Felipe Balbi diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c index 403a48b..83a266b 100644 --- a/drivers/usb/gadget/f_hid.c +++ b/drivers/usb/gadget/f_hid.c @@ -367,6 +367,13 @@ static int hidg_setup(struct usb_function *f, case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 | USB_REQ_GET_DESCRIPTOR): switch (value >> 8) { + case HID_DT_HID: + VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); + length = min_t(unsigned short, length, + hidg_desc.bLength); + memcpy(req->buf, &hidg_desc, length); + goto respond; + break; case HID_DT_REPORT: VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); length = min_t(unsigned short, length, -- cgit v0.10.2 From d4aefec5daf51ebda90ebf0989598c206cf8e640 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 28 Jul 2011 22:59:53 +0800 Subject: usb: host: ehci-omap: fix .remove and failure handling path of .probe(v1) Obviously, disabling & put regulator and iounmap(hcd->regs) are missed in .remove and failure handling path of .probe, so add them. Signed-off-by: Ming Lei Acked-by: Alan Stern Tested-by: Keshava Munegowda Signed-off-by: Felipe Balbi diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 55a57c2..4524032 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -98,6 +98,18 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) } } +static void disable_put_regulator( + struct ehci_hcd_omap_platform_data *pdata) +{ + int i; + + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (pdata->regulator[i]) { + regulator_disable(pdata->regulator[i]); + regulator_put(pdata->regulator[i]); + } + } +} /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ @@ -231,9 +243,11 @@ err_add_hcd: omap_usbhs_disable(dev); err_enable: + disable_put_regulator(pdata); usb_put_hcd(hcd); err_io: + iounmap(regs); return ret; } @@ -253,6 +267,8 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) usb_remove_hcd(hcd); omap_usbhs_disable(dev); + disable_put_regulator(dev->platform_data); + iounmap(hcd->regs); usb_put_hcd(hcd); return 0; } -- cgit v0.10.2 From 72c487dfb94d02025fb7437dfe2314d836d5a9ab Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 20 Jul 2011 17:09:34 -0700 Subject: usb: musb: fix oops on musb_gadget_pullup an 'unhandled fault' is causes when a gadget driver calls usb_gadget_connect() while the USB cable isn't plugged into the OTG port. the fault is caused by an access to MUSB's memory space while its clock is turned off due to pm_runtime kicking in. in order to fix the fault, we enclose musb_gadget_pullup() with pm_runtime_get_sync() ... pm_runtime_put() calls to be sure we will always reach that path with clock turned on. [ balbi@ti.com : simplified commit log; removed few things which didn't belong there ] Cc: stable@kernel.org Reported-by: Zach Pfeffer Signed-off-by: John Stultz Signed-off-by: Felipe Balbi diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index b67a062..8c41a2e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1698,6 +1698,8 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) is_on = !!is_on; + pm_runtime_get_sync(musb->controller); + /* NOTE: this assumes we are sensing vbus; we'd rather * not pullup unless the B-session is active. */ @@ -1707,6 +1709,9 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) musb_pullup(musb, is_on); } spin_unlock_irqrestore(&musb->lock, flags); + + pm_runtime_put(musb->controller); + return 0; } -- cgit v0.10.2 From 4a4c879904aa0cc64629e14a49b64fb3d149bf1a Mon Sep 17 00:00:00 2001 From: Dan Bastone Date: Sun, 31 Jul 2011 07:40:49 -0400 Subject: HID: add support for new revision of Apple aluminum keyboard Add USB device ids for the new revision (MB110LL/B) of Apple's wired aluminum keyboard. I have only confirmed that the ANSI version is correct - it is assumed that the ISO and JIS versions follow the standard numbering convention. Signed-off-by: Dan Bastone Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index b85744f..18b3bc6 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -444,6 +444,12 @@ static const struct hid_device_id apple_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO), + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS), + .driver_data = APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO), diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1a5cf0c..242353d 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1340,6 +1340,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_JIS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index db63ccf..61c8809 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -109,6 +109,9 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 +#define USB_DEVICE_ID_APPLE_ALU_REVB_ANSI 0x024f +#define USB_DEVICE_ID_APPLE_ALU_REVB_ISO 0x0250 +#define USB_DEVICE_ID_APPLE_ALU_REVB_JIS 0x0251 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b -- cgit v0.10.2 From a5395b83b78f62ccf5e3af854aacd025c2a6e7b5 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Tue, 2 Aug 2011 09:24:09 +0200 Subject: cfq-iosched: Reduce linked group count upon group destruction FQ keeps track of number of groups which are linked on blkcg->blkg_list. This is useful to avoid races between queue exit and cgroup exit code paths. So if at the request queue exit time linked group count is not zero, that means there are some group out there which is yet to be deleted under rcu read period and queue exit code should wait for on rcu period. In my previous patch I forgot to decrease the number of group count. So in current form, we nr_blkcg_linked_grps is always non-zero and we will always wait one rcu period (if BLK_CGROUP=y). The side effect of this is that it can increase boot time. I am surprised, nobody complained so far. Signed-off-by: Vivek Goyal Signed-off-by: Jens Axboe diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1f96ad6..6508345 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -1209,6 +1209,9 @@ static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) hlist_del_init(&cfqg->cfqd_node); + BUG_ON(cfqd->nr_blkcg_linked_grps <= 0); + cfqd->nr_blkcg_linked_grps--; + /* * Put the reference taken at the time of creation so that when all * queues are gone, group can be destroyed. -- cgit v0.10.2 From e2a5429ff7947ad251310376384f449297b7492a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 2 Aug 2011 10:43:35 +0200 Subject: bsg-lib: add module.h include Due to conflicts with the moduleh tree in linux-next, we run into an include file mess. We really need export.h in that tree, but if we add module.h locally then the issue is easier to resolve. Reported-by: Stephen Rothwell Signed-off-by: Jens Axboe diff --git a/block/bsg-lib.c b/block/bsg-lib.c index f8c0a61..6690e6e 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /** -- cgit v0.10.2 From ddad9ef5826efdfbbdb67b13b46f30e43e46ec3e Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Tue, 2 Aug 2011 12:43:49 +0200 Subject: drivers/block/drbd/drbd_nl.c: use bitmap_parse instead of __bitmap_parse The buffer 'sc.cpu_mask' is a kernel buffer. If bitmap_parse is used instead of __bitmap_parse the extra parameter that indicates a kernel buffer is not needed. Signed-off-by: H Hartley Sweeten Cc: Lars Ellenberg Cc: Philipp Reisner Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 515bcd9..0feab26 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1829,10 +1829,10 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n /* silently ignore cpu mask on UP kernel */ if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { - err = __bitmap_parse(sc.cpu_mask, 32, 0, + err = bitmap_parse(sc.cpu_mask, 32, cpumask_bits(new_cpu_mask), nr_cpu_ids); if (err) { - dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); + dev_warn(DEV, "bitmap_parse() failed with %d\n", err); retcode = ERR_CPU_MASK_PARSE; goto fail; } -- cgit v0.10.2 From aec9f377e4f235c47e27fd8a429555dfa2dda342 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 2 Aug 2011 12:43:50 +0200 Subject: drivers/cdrom/cdrom.c: relax check on dvd manufacturer value The report has an ISO which has a very long manufacturer ID. It seems that Linux is wrong, not the ISO maker. Relax the check for the length of this field: emit a warning and truncate the incoming data to 2048 bytes rather than rejecting the entire thing. dvd_manufact.value isn't null-terminated. I'm not even sure if it's a string. The kernel doesn't apepar to use it anyway. Addresses https://bugzilla.kernel.org/show_bug.cgi?id=39062 Reported-by: Tested-by: Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 75fb965..f997c27 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, goto out; s->manufact.len = buf[0] << 8 | buf[1]; - if (s->manufact.len < 0 || s->manufact.len > 2048) { + if (s->manufact.len < 0) { cdinfo(CD_WARNING, "Received invalid manufacture info length" " (%d)\n", s->manufact.len); ret = -EIO; } else { + if (s->manufact.len > 2048) { + cdinfo(CD_WARNING, "Received invalid manufacture info " + "length (%d): truncating to 2048\n", + s->manufact.len); + s->manufact.len = 2048; + } memcpy(s->manufact.value, &buf[4], s->manufact.len); } -- cgit v0.10.2 From f95fe9cfb49f6e625fbb5888cae2ed6f3a276b89 Mon Sep 17 00:00:00 2001 From: Herbert Poetzl Date: Tue, 2 Aug 2011 12:43:50 +0200 Subject: block/genhd.c: remove useless cast in diskstats_show() Remove the (unsigned long long) cast in diskstats_show() and adjusts the seq_printf() format string to 'unsigned long' diskstats_show() uses part_stat_read() to get the stats, which either accesses the specified field in the struct disk_stats directly (non SMP) or sums up the per CPU values in a variable of the same type as the field, so in any case the result will have the same type and range as the specified field which for all disk_stats entries is unsigned long Also, for unsigned long ranges the output of %lu should be identical to the one of %llu, so no change in the actual proc entry contents. Signed-off-by: Herbert Poetzl Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe diff --git a/block/genhd.c b/block/genhd.c index 5cb51c5..e2f6790 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1146,17 +1146,17 @@ static int diskstats_show(struct seq_file *seqf, void *v) cpu = part_stat_lock(); part_round_stats(cpu, hd); part_stat_unlock(); - seq_printf(seqf, "%4d %7d %s %lu %lu %llu " - "%u %lu %lu %llu %u %u %u %u\n", + seq_printf(seqf, "%4d %7d %s %lu %lu %lu " + "%u %lu %lu %lu %u %u %u %u\n", MAJOR(part_devt(hd)), MINOR(part_devt(hd)), disk_name(gp, hd->partno, buf), part_stat_read(hd, ios[READ]), part_stat_read(hd, merges[READ]), - (unsigned long long)part_stat_read(hd, sectors[READ]), + part_stat_read(hd, sectors[READ]), jiffies_to_msecs(part_stat_read(hd, ticks[READ])), part_stat_read(hd, ios[WRITE]), part_stat_read(hd, merges[WRITE]), - (unsigned long long)part_stat_read(hd, sectors[WRITE]), + part_stat_read(hd, sectors[WRITE]), jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])), part_in_flight(hd), jiffies_to_msecs(part_stat_read(hd, io_ticks)), -- cgit v0.10.2 From debc3b778508f59696ff188f0feca271dcbfa7d9 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Tue, 2 Aug 2011 00:01:18 -0500 Subject: PCI: export pcie_bus_configure_settings symbol pcie_bus_configure_settings needs to be exported if the PCI hotplug driver is being compiled as a module. Reported-by: Stephen Rothwell Signed-off-by: Jon Mason Signed-off-by: Jesse Barnes diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5becf7c..8473727 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1470,6 +1470,7 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) pcie_bus_configure_set(bus->self, &smpss); pci_walk_bus(bus, pcie_bus_configure_set, &smpss); } +EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus) { -- cgit v0.10.2 From 891f692533c36a17f00d25d24e4ac44ef38c9e5c Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 17:53:54 +0000 Subject: Docs: MSI-HOWTO: Use the subjunctive, and change `can' to `may' Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 3f5e0b0..43ffff1 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -45,7 +45,7 @@ arrived in memory (this becomes more likely with devices behind PCI-PCI bridges). In order to ensure that all the data has arrived in memory, the interrupt handler must read a register on the device which raised the interrupt. PCI transaction ordering rules require that all the data -arrives in memory before the value can be returned from the register. +arrive in memory before the value may be returned from the register. Using MSIs avoids this problem as the interrupt-generating write cannot pass the data writes, so by the time the interrupt is raised, the driver knows that all the data has arrived in memory. -- cgit v0.10.2 From 4979de6efb5553505a595eadc1cf7c386ca1ddc6 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 19:52:56 +0000 Subject: Docs: MSI-HOWTO: Use present tense and streamline some wording Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 43ffff1..13f3a99 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -86,13 +86,13 @@ device. int pci_enable_msi(struct pci_dev *dev) -A successful call will allocate ONE interrupt to the device, regardless -of how many MSIs the device supports. The device will be switched from +A successful call allocates ONE interrupt to the device, regardless +of how many MSIs the device supports. The device is switched from pin-based interrupt mode to MSI mode. The dev->irq number is changed -to a new number which represents the message signaled interrupt. -This function should be called before the driver calls request_irq() -since enabling MSIs disables the pin-based IRQ and the driver will not -receive interrupts on the old interrupt. +to a new number which represents the message signaled interrupt; +consequently, this function should be called before the driver calls +request_irq(), because an MSI is delivered via a vector that is +different from the vector of a pin-based interrupt. 4.2.2 pci_enable_msi_block @@ -111,10 +111,10 @@ the device are in the range dev->irq to dev->irq + count - 1. If this function returns a negative number, it indicates an error and the driver should not attempt to request any more MSI interrupts for -this device. If this function returns a positive number, it will be -less than 'count' and indicate the number of interrupts that could have -been allocated. In neither case will the irq value have been -updated, nor will the device have been switched into MSI mode. +this device. If this function returns a positive number, it is +less than 'count' and indicates the number of interrupts that could have +been allocated. In neither case is the irq value updated or the device +switched into MSI mode. The device driver must decide what action to take if pci_enable_msi_block() returns a value less than the number asked for. @@ -124,7 +124,7 @@ again. Note that it is not guaranteed to succeed, even when the 'count' has been reduced to the value returned from a previous call to pci_enable_msi_block(). This is because there are multiple constraints on the number of vectors that can be allocated; pci_enable_msi_block() -will return as soon as it finds any constraint that doesn't allow the +returns as soon as it finds any constraint that doesn't allow the call to succeed. 4.2.3 pci_disable_msi @@ -139,8 +139,8 @@ device, so drivers should not cache the value of dev->irq. A device driver must always call free_irq() on the interrupt(s) for which it has called request_irq() before calling this function. -Failure to do so will result in a BUG_ON(), the device will be left with -MSI enabled and will leak its vector. +Failure to do so results in a BUG_ON(), leaving the device with +MSI enabled and thus leaking its vector. 4.3 Using MSI-X @@ -168,10 +168,10 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) Calling this function asks the PCI subsystem to allocate 'nvec' MSIs. The 'entries' argument is a pointer to an array of msix_entry structs which should be at least 'nvec' entries in size. On success, the -function will return 0 and the device will have been switched into -MSI-X interrupt mode. The 'vector' elements in each entry will have -been filled in with the interrupt number. The driver should then call -request_irq() for each 'vector' that it decides to use. +device is switched into MSI-X mode and the function returns 0. +The 'vector' member in each entry is populated with the interrupt number; +the driver should then call request_irq() for each 'vector' that it +decides to use. If this function returns a negative number, it indicates an error and the driver should not attempt to allocate any more MSI-X interrupts for @@ -219,8 +219,8 @@ the value of the 'vector' elements over a call to pci_disable_msix(). A device driver must always call free_irq() on the interrupt(s) for which it has called request_irq() before calling this function. -Failure to do so will result in a BUG_ON(), the device will be left with -MSI enabled and will leak its vector. +Failure to do so results in a BUG_ON(), leaving the device with +MSI-X enabled and thus leaking its vector. 4.3.3 The MSI-X Table @@ -235,7 +235,7 @@ If a device implements both MSI and MSI-X capabilities, it can run in either MSI mode or MSI-X mode but not both simultaneously. This is a requirement of the PCI spec, and it is enforced by the PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or -pci_enable_msix() when MSI is already enabled will result in an error. +pci_enable_msix() when MSI is already enabled results in an error. If a device driver wishes to switch between MSI and MSI-X at runtime, it must first quiesce the device, then switch it back to pin-interrupt mode, before calling pci_enable_msi() or pci_enable_msix() and resuming @@ -281,7 +281,7 @@ disabled to enabled and back again. Using 'lspci -v' (as root) may show some devices with "MSI", "Message Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities -has an 'Enable' flag which will be followed with either "+" (enabled) +has an 'Enable' flag which is followed with either "+" (enabled) or "-" (disabled). @@ -298,7 +298,7 @@ The PCI stack provides three ways to disable MSIs: Some host chipsets simply don't support MSIs properly. If we're lucky, the manufacturer knows this and has indicated it in the ACPI -FADT table. In this case, Linux will automatically disable MSIs. +FADT table. In this case, Linux automatically disables MSIs. Some boards don't include this information in the table and so we have to detect them ourselves. The complete list of these is found near the quirk_disable_all_msi() function in drivers/pci/quirks.c. -- cgit v0.10.2 From a2d4d50128279c67d4cf38061206cddc1fc37e75 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 20:03:28 +0000 Subject: Docs: MSI-HOWTO: `asked for' -> `requested' Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 13f3a99..867ed03 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -117,7 +117,7 @@ been allocated. In neither case is the irq value updated or the device switched into MSI mode. The device driver must decide what action to take if -pci_enable_msi_block() returns a value less than the number asked for. +pci_enable_msi_block() returns a value less than the number requested. Some devices can make use of fewer interrupts than the maximum they request; in this case the driver should call pci_enable_msi_block() again. Note that it is not guaranteed to succeed, even when the -- cgit v0.10.2 From 1d15afcc73004028f2870ede7a56d590e1ca8ca8 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 20:05:01 +0000 Subject: Docs: MSI-HOWTO: Streamline some wording Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 867ed03..faf37f9 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -118,8 +118,8 @@ switched into MSI mode. The device driver must decide what action to take if pci_enable_msi_block() returns a value less than the number requested. -Some devices can make use of fewer interrupts than the maximum they -request; in this case the driver should call pci_enable_msi_block() +For instance, the driver could still make use of fewer interrupts; +in this case the driver should call pci_enable_msi_block() again. Note that it is not guaranteed to succeed, even when the 'count' has been reduced to the value returned from a previous call to pci_enable_msi_block(). This is because there are multiple constraints -- cgit v0.10.2 From 263d8d57b3b2e2fbb4e79b7cda7ef3399add4fb7 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 21:28:00 +0000 Subject: Docs: MSI-HOWTO: Put the `because' subordinate clause first Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index faf37f9..1d7047a 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -137,8 +137,8 @@ interrupt number and frees the previously allocated message signaled interrupt(s). The interrupt may subsequently be assigned to another device, so drivers should not cache the value of dev->irq. -A device driver must always call free_irq() on the interrupt(s) -for which it has called request_irq() before calling this function. +Before calling this function, a device driver must always call free_irq() +on any interrupt for which it previously called request_irq(). Failure to do so results in a BUG_ON(), leaving the device with MSI enabled and thus leaking its vector. @@ -217,8 +217,8 @@ the previously allocated message signaled interrupts. The interrupts may subsequently be assigned to another device, so drivers should not cache the value of the 'vector' elements over a call to pci_disable_msix(). -A device driver must always call free_irq() on the interrupt(s) -for which it has called request_irq() before calling this function. +Before calling this function, a device driver must always call free_irq() +on any interrupt for which it previously called request_irq(). Failure to do so results in a BUG_ON(), leaving the device with MSI-X enabled and thus leaking its vector. -- cgit v0.10.2 From e4439236ef5ac8e51ce97d03df8ef3e6dc5c6d51 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 21:30:18 +0000 Subject: Docs: MSI-HOWTO: Offset modifier with a comma, and insert `yet' for emphasis Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 1d7047a..515396a 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -155,10 +155,10 @@ struct msix_entry { }; This allows for the device to use these interrupts in a sparse fashion; -for example it could use interrupts 3 and 1027 and allocate only a +for example, it could use interrupts 3 and 1027 and yet allocate only a two-element array. The driver is expected to fill in the 'entry' value -in each element of the array to indicate which entries it wants the kernel -to assign interrupts for. It is invalid to fill in two entries with the +in each element of the array to indicate for which entries the kernel +should assign interrupts; it is invalid to fill in two entries with the same number. 4.3.1 pci_enable_msix -- cgit v0.10.2 From ed737c1882c652f0b5a888df59895b5dc2d10cd7 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Mon, 18 Jul 2011 16:15:00 +0000 Subject: Docs: MSI-HOWTO: Insert `that' ... as per Randy Dunlap's wishes :-P Message-Id: <20110717114023.2b4cce91.rdunlap@xenotime.net> Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 515396a..c504f12 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -190,7 +190,7 @@ during the initialization phase. It is ideal if drivers can cope with a variable number of MSI-X interrupts, there are many reasons why the platform may not be able to provide the -exact number a driver asks for. +exact number that a driver asks for. A request loop to achieve that might look like: -- cgit v0.10.2 From 6457d9b350b3f4f2098984eee016c6c994b9c096 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 21:54:18 +0000 Subject: Docs: MSI-HOWTO: Move a sentence to another paragraph Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index c504f12..28d1cee 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -171,7 +171,8 @@ which should be at least 'nvec' entries in size. On success, the device is switched into MSI-X mode and the function returns 0. The 'vector' member in each entry is populated with the interrupt number; the driver should then call request_irq() for each 'vector' that it -decides to use. +decides to use. The device driver is responsible for keeping track of the +interrupts assigned to the MSI-X vectors so it can free them again later. If this function returns a negative number, it indicates an error and the driver should not attempt to allocate any more MSI-X interrupts for @@ -181,9 +182,7 @@ below. This function, in contrast with pci_enable_msi(), does not adjust dev->irq. The device will not generate interrupts for this interrupt -number once MSI-X is enabled. The device driver is responsible for -keeping track of the interrupts assigned to the MSI-X vectors so it can -free them again later. +number once MSI-X is enabled. Device drivers should normally call this function once per device during the initialization phase. -- cgit v0.10.2 From 5a84fc3162e06632ebea42cefe3b964299213d33 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 21:55:05 +0000 Subject: Docs: MSI-HOWTO: , -> ; Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 28d1cee..f533bc2 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -187,7 +187,7 @@ number once MSI-X is enabled. Device drivers should normally call this function once per device during the initialization phase. -It is ideal if drivers can cope with a variable number of MSI-X interrupts, +It is ideal if drivers can cope with a variable number of MSI-X interrupts; there are many reasons why the platform may not be able to provide the exact number that a driver asks for. -- cgit v0.10.2 From e6ffceb0ded9beeaddd9c246b3fec298c6b1f0c9 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Thu, 14 Jul 2011 23:30:47 +0000 Subject: Docs: MSI-HOWTO: API -> function Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index f533bc2..d9c8d98 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -211,7 +211,7 @@ static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec) void pci_disable_msix(struct pci_dev *dev) -This API should be used to undo the effect of pci_enable_msix(). It frees +This function should be used to undo the effect of pci_enable_msix(). It frees the previously allocated message signaled interrupts. The interrupts may subsequently be assigned to another device, so drivers should not cache the value of the 'vector' elements over a call to pci_disable_msix(). -- cgit v0.10.2 From e14bd7e614b57493e1cbefb8a06d3754bdd04e26 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Fri, 15 Jul 2011 03:12:13 +0000 Subject: Docs: MSI-HOWTO: Insert a comma Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index d9c8d98..c9cffaf 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -231,7 +231,7 @@ mask or unmask an interrupt, it should call disable_irq() / enable_irq(). 4.4 Handling devices implementing both MSI and MSI-X capabilities If a device implements both MSI and MSI-X capabilities, it can -run in either MSI mode or MSI-X mode but not both simultaneously. +run in either MSI mode or MSI-X mode, but not both simultaneously. This is a requirement of the PCI spec, and it is enforced by the PCI layer. Calling pci_enable_msi() when MSI-X is already enabled or pci_enable_msix() when MSI is already enabled results in an error. -- cgit v0.10.2 From 952df55b5a30913f4a5536b12ad09dd95c66d83f Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Fri, 15 Jul 2011 03:15:10 +0000 Subject: Docs: MSI-HOWTO: may -> might Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index c9cffaf..257628f 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -250,7 +250,7 @@ the MSI-X facilities in preference to the MSI facilities. As mentioned above, MSI-X supports any number of interrupts between 1 and 2048. In constrast, MSI is restricted to a maximum of 32 interrupts (and must be a power of two). In addition, the MSI interrupt vectors must -be allocated consecutively, so the system may not be able to allocate +be allocated consecutively, so the system might not be able to allocate as many vectors for MSI as it could for MSI-X. On some platforms, MSI interrupts must all be targeted at the same set of CPUs whereas MSI-X interrupts can all be targeted at different CPUs. -- cgit v0.10.2 From e6b85a1f8a56d3c9db0273b7e4aaab802dc07a9b Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Fri, 15 Jul 2011 03:25:44 +0000 Subject: Docs: MSI-HOWTO: Use `unknown ...' rather than `... know about.' Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 257628f..2322a57 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -316,7 +316,7 @@ Some bridges allow you to enable MSIs by changing some bits in their PCI configuration space (especially the Hypertransport chipsets such as the nVidia nForce and Serverworks HT2000). As with host chipsets, Linux mostly knows about them and automatically enables MSIs if it can. -If you have a bridge which Linux doesn't yet know about, you can enable +If you have a bridge unknown to Linux, you can enable MSIs in configuration space using whatever method you know works, then enable MSIs on that bridge by doing: -- cgit v0.10.2 From 1b8386f61241471c722fbdff48e3d1d97bfca8e6 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Fri, 15 Jul 2011 03:26:37 +0000 Subject: Docs: MSI-HOWTO: can -> could Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 2322a57..3b47278 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -326,7 +326,7 @@ where $bridge is the PCI address of the bridge you've enabled (eg 0000:00:0e.0). To disable MSIs, echo 0 instead of 1. Changing this value should be -done with caution as it can break interrupt handling for all devices +done with caution as it could break interrupt handling for all devices below this bridge. Again, please notify linux-pci@vger.kernel.org of any bridges that need -- cgit v0.10.2 From c2b65e181acb9a981c890489c0f9a04d8e1b91f9 Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Fri, 15 Jul 2011 03:27:22 +0000 Subject: Docs: MSI-HOWTO: Insert a comma Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 3b47278..67ed5d8 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -335,7 +335,7 @@ special handling. 5.3. Disabling MSIs on a single device Some devices are known to have faulty MSI implementations. Usually this -is handled in the individual device driver but occasionally it's necessary +is handled in the individual device driver, but occasionally it's necessary to handle this with a quirk. Some drivers have an option to disable use of MSI. While this is a convenient workaround for the driver author, it is not good practise, and should not be emulated. -- cgit v0.10.2 From 798c794df81e0a1af62c1d7e48b464f4096f3b9a Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Fri, 15 Jul 2011 03:29:04 +0000 Subject: Docs: MSI-HOWTO: MSI -> MSIs Signed-off-by: Michael Witten Acked-by: Matthew Wilcox Signed-off-by: Randy Dunlap diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt index 67ed5d8..53e6fca 100644 --- a/Documentation/PCI/MSI-HOWTO.txt +++ b/Documentation/PCI/MSI-HOWTO.txt @@ -349,7 +349,7 @@ for your machine. You should also check your .config to be sure you have enabled CONFIG_PCI_MSI. Then, 'lspci -t' gives the list of bridges above a device. Reading -/sys/bus/pci/devices/*/msi_bus will tell you whether MSI are enabled (1) +/sys/bus/pci/devices/*/msi_bus will tell you whether MSIs are enabled (1) or disabled (0). If 0 is found in any of the msi_bus files belonging to bridges between the PCI root and the device, MSIs are disabled. -- cgit v0.10.2 From bf0c0259c79b325fd4ea139f363366d319786ea2 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Tue, 2 Aug 2011 14:20:26 -0300 Subject: staging: fix zcache building zcache is only building tmem.c and not building zcache.c. To keep the module name, zcache.c must be renamed if symbols from tmem.c are to remain unexported. Signed-off-by: Thadeu Lima de Souza Cascardo Acked-by: Dan Magenheimer Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile index f5ec64f..60daa27 100644 --- a/drivers/staging/zcache/Makefile +++ b/drivers/staging/zcache/Makefile @@ -1,3 +1,3 @@ -zcache-y := tmem.o +zcache-y := zcache-main.o tmem.o obj-$(CONFIG_ZCACHE) += zcache.o diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c new file mode 100644 index 0000000..65a81a0 --- /dev/null +++ b/drivers/staging/zcache/zcache-main.c @@ -0,0 +1,1996 @@ +/* + * zcache.c + * + * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp. + * Copyright (c) 2010,2011, Nitin Gupta + * + * Zcache provides an in-kernel "host implementation" for transcendent memory + * and, thus indirectly, for cleancache and frontswap. Zcache includes two + * page-accessible memory [1] interfaces, both utilizing lzo1x compression: + * 1) "compression buddies" ("zbud") is used for ephemeral pages + * 2) xvmalloc is used for persistent pages. + * Xvmalloc (based on the TLSF allocator) has very low fragmentation + * so maximizes space efficiency, while zbud allows pairs (and potentially, + * in the future, more than a pair of) compressed pages to be closely linked + * so that reclaiming can be done via the kernel's physical-page-oriented + * "shrinker" interface. + * + * [1] For a definition of page-accessible memory (aka PAM), see: + * http://marc.info/?l=linux-mm&m=127811271605009 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "tmem.h" + +#include "../zram/xvmalloc.h" /* if built in drivers/staging */ + +#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP)) +#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP" +#endif +#ifdef CONFIG_CLEANCACHE +#include +#endif +#ifdef CONFIG_FRONTSWAP +#include +#endif + +#if 0 +/* this is more aggressive but may cause other problems? */ +#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN) +#else +#define ZCACHE_GFP_MASK \ + (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC) +#endif + +#define MAX_POOLS_PER_CLIENT 16 + +#define MAX_CLIENTS 16 +#define LOCAL_CLIENT ((uint16_t)-1) +struct zcache_client { + struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; + struct xv_pool *xvpool; + bool allocated; + atomic_t refcount; +}; + +static struct zcache_client zcache_host; +static struct zcache_client zcache_clients[MAX_CLIENTS]; + +static inline uint16_t get_client_id_from_client(struct zcache_client *cli) +{ + BUG_ON(cli == NULL); + if (cli == &zcache_host) + return LOCAL_CLIENT; + return cli - &zcache_clients[0]; +} + +static inline bool is_local_client(struct zcache_client *cli) +{ + return cli == &zcache_host; +} + +/********** + * Compression buddies ("zbud") provides for packing two (or, possibly + * in the future, more) compressed ephemeral pages into a single "raw" + * (physical) page and tracking them with data structures so that + * the raw pages can be easily reclaimed. + * + * A zbud page ("zbpg") is an aligned page containing a list_head, + * a lock, and two "zbud headers". The remainder of the physical + * page is divided up into aligned 64-byte "chunks" which contain + * the compressed data for zero, one, or two zbuds. Each zbpg + * resides on: (1) an "unused list" if it has no zbuds; (2) a + * "buddied" list if it is fully populated with two zbuds; or + * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks + * the one unbuddied zbud uses. The data inside a zbpg cannot be + * read or written unless the zbpg's lock is held. + */ + +#define ZBH_SENTINEL 0x43214321 +#define ZBPG_SENTINEL 0xdeadbeef + +#define ZBUD_MAX_BUDS 2 + +struct zbud_hdr { + uint16_t client_id; + uint16_t pool_id; + struct tmem_oid oid; + uint32_t index; + uint16_t size; /* compressed size in bytes, zero means unused */ + DECL_SENTINEL +}; + +struct zbud_page { + struct list_head bud_list; + spinlock_t lock; + struct zbud_hdr buddy[ZBUD_MAX_BUDS]; + DECL_SENTINEL + /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */ +}; + +#define CHUNK_SHIFT 6 +#define CHUNK_SIZE (1 << CHUNK_SHIFT) +#define CHUNK_MASK (~(CHUNK_SIZE-1)) +#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \ + CHUNK_MASK) >> CHUNK_SHIFT) +#define MAX_CHUNK (NCHUNKS-1) + +static struct { + struct list_head list; + unsigned count; +} zbud_unbuddied[NCHUNKS]; +/* list N contains pages with N chunks USED and NCHUNKS-N unused */ +/* element 0 is never used but optimizing that isn't worth it */ +static unsigned long zbud_cumul_chunk_counts[NCHUNKS]; + +struct list_head zbud_buddied_list; +static unsigned long zcache_zbud_buddied_count; + +/* protects the buddied list and all unbuddied lists */ +static DEFINE_SPINLOCK(zbud_budlists_spinlock); + +static LIST_HEAD(zbpg_unused_list); +static unsigned long zcache_zbpg_unused_list_count; + +/* protects the unused page list */ +static DEFINE_SPINLOCK(zbpg_unused_list_spinlock); + +static atomic_t zcache_zbud_curr_raw_pages; +static atomic_t zcache_zbud_curr_zpages; +static unsigned long zcache_zbud_curr_zbytes; +static unsigned long zcache_zbud_cumul_zpages; +static unsigned long zcache_zbud_cumul_zbytes; +static unsigned long zcache_compress_poor; +static unsigned long zcache_mean_compress_poor; + +/* forward references */ +static void *zcache_get_free_page(void); +static void zcache_free_page(void *p); + +/* + * zbud helper functions + */ + +static inline unsigned zbud_max_buddy_size(void) +{ + return MAX_CHUNK << CHUNK_SHIFT; +} + +static inline unsigned zbud_size_to_chunks(unsigned size) +{ + BUG_ON(size == 0 || size > zbud_max_buddy_size()); + return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; +} + +static inline int zbud_budnum(struct zbud_hdr *zh) +{ + unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1); + struct zbud_page *zbpg = NULL; + unsigned budnum = -1U; + int i; + + for (i = 0; i < ZBUD_MAX_BUDS; i++) + if (offset == offsetof(typeof(*zbpg), buddy[i])) { + budnum = i; + break; + } + BUG_ON(budnum == -1U); + return budnum; +} + +static char *zbud_data(struct zbud_hdr *zh, unsigned size) +{ + struct zbud_page *zbpg; + char *p; + unsigned budnum; + + ASSERT_SENTINEL(zh, ZBH); + budnum = zbud_budnum(zh); + BUG_ON(size == 0 || size > zbud_max_buddy_size()); + zbpg = container_of(zh, struct zbud_page, buddy[budnum]); + ASSERT_SPINLOCK(&zbpg->lock); + p = (char *)zbpg; + if (budnum == 0) + p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) & + CHUNK_MASK); + else if (budnum == 1) + p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK); + return p; +} + +/* + * zbud raw page management + */ + +static struct zbud_page *zbud_alloc_raw_page(void) +{ + struct zbud_page *zbpg = NULL; + struct zbud_hdr *zh0, *zh1; + bool recycled = 0; + + /* if any pages on the zbpg list, use one */ + spin_lock(&zbpg_unused_list_spinlock); + if (!list_empty(&zbpg_unused_list)) { + zbpg = list_first_entry(&zbpg_unused_list, + struct zbud_page, bud_list); + list_del_init(&zbpg->bud_list); + zcache_zbpg_unused_list_count--; + recycled = 1; + } + spin_unlock(&zbpg_unused_list_spinlock); + if (zbpg == NULL) + /* none on zbpg list, try to get a kernel page */ + zbpg = zcache_get_free_page(); + if (likely(zbpg != NULL)) { + INIT_LIST_HEAD(&zbpg->bud_list); + zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1]; + spin_lock_init(&zbpg->lock); + if (recycled) { + ASSERT_INVERTED_SENTINEL(zbpg, ZBPG); + SET_SENTINEL(zbpg, ZBPG); + BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid)); + BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid)); + } else { + atomic_inc(&zcache_zbud_curr_raw_pages); + INIT_LIST_HEAD(&zbpg->bud_list); + SET_SENTINEL(zbpg, ZBPG); + zh0->size = 0; zh1->size = 0; + tmem_oid_set_invalid(&zh0->oid); + tmem_oid_set_invalid(&zh1->oid); + } + } + return zbpg; +} + +static void zbud_free_raw_page(struct zbud_page *zbpg) +{ + struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1]; + + ASSERT_SENTINEL(zbpg, ZBPG); + BUG_ON(!list_empty(&zbpg->bud_list)); + ASSERT_SPINLOCK(&zbpg->lock); + BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid)); + BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid)); + INVERT_SENTINEL(zbpg, ZBPG); + spin_unlock(&zbpg->lock); + spin_lock(&zbpg_unused_list_spinlock); + list_add(&zbpg->bud_list, &zbpg_unused_list); + zcache_zbpg_unused_list_count++; + spin_unlock(&zbpg_unused_list_spinlock); +} + +/* + * core zbud handling routines + */ + +static unsigned zbud_free(struct zbud_hdr *zh) +{ + unsigned size; + + ASSERT_SENTINEL(zh, ZBH); + BUG_ON(!tmem_oid_valid(&zh->oid)); + size = zh->size; + BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); + zh->size = 0; + tmem_oid_set_invalid(&zh->oid); + INVERT_SENTINEL(zh, ZBH); + zcache_zbud_curr_zbytes -= size; + atomic_dec(&zcache_zbud_curr_zpages); + return size; +} + +static void zbud_free_and_delist(struct zbud_hdr *zh) +{ + unsigned chunks; + struct zbud_hdr *zh_other; + unsigned budnum = zbud_budnum(zh), size; + struct zbud_page *zbpg = + container_of(zh, struct zbud_page, buddy[budnum]); + + spin_lock(&zbpg->lock); + if (list_empty(&zbpg->bud_list)) { + /* ignore zombie page... see zbud_evict_pages() */ + spin_unlock(&zbpg->lock); + return; + } + size = zbud_free(zh); + ASSERT_SPINLOCK(&zbpg->lock); + zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0]; + if (zh_other->size == 0) { /* was unbuddied: unlist and free */ + chunks = zbud_size_to_chunks(size) ; + spin_lock(&zbud_budlists_spinlock); + BUG_ON(list_empty(&zbud_unbuddied[chunks].list)); + list_del_init(&zbpg->bud_list); + zbud_unbuddied[chunks].count--; + spin_unlock(&zbud_budlists_spinlock); + zbud_free_raw_page(zbpg); + } else { /* was buddied: move remaining buddy to unbuddied list */ + chunks = zbud_size_to_chunks(zh_other->size) ; + spin_lock(&zbud_budlists_spinlock); + list_del_init(&zbpg->bud_list); + zcache_zbud_buddied_count--; + list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list); + zbud_unbuddied[chunks].count++; + spin_unlock(&zbud_budlists_spinlock); + spin_unlock(&zbpg->lock); + } +} + +static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id, + struct tmem_oid *oid, + uint32_t index, struct page *page, + void *cdata, unsigned size) +{ + struct zbud_hdr *zh0, *zh1, *zh = NULL; + struct zbud_page *zbpg = NULL, *ztmp; + unsigned nchunks; + char *to; + int i, found_good_buddy = 0; + + nchunks = zbud_size_to_chunks(size) ; + for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) { + spin_lock(&zbud_budlists_spinlock); + if (!list_empty(&zbud_unbuddied[i].list)) { + list_for_each_entry_safe(zbpg, ztmp, + &zbud_unbuddied[i].list, bud_list) { + if (spin_trylock(&zbpg->lock)) { + found_good_buddy = i; + goto found_unbuddied; + } + } + } + spin_unlock(&zbud_budlists_spinlock); + } + /* didn't find a good buddy, try allocating a new page */ + zbpg = zbud_alloc_raw_page(); + if (unlikely(zbpg == NULL)) + goto out; + /* ok, have a page, now compress the data before taking locks */ + spin_lock(&zbpg->lock); + spin_lock(&zbud_budlists_spinlock); + list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list); + zbud_unbuddied[nchunks].count++; + zh = &zbpg->buddy[0]; + goto init_zh; + +found_unbuddied: + ASSERT_SPINLOCK(&zbpg->lock); + zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1]; + BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0))); + if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */ + ASSERT_SENTINEL(zh0, ZBH); + zh = zh1; + } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */ + ASSERT_SENTINEL(zh1, ZBH); + zh = zh0; + } else + BUG(); + list_del_init(&zbpg->bud_list); + zbud_unbuddied[found_good_buddy].count--; + list_add_tail(&zbpg->bud_list, &zbud_buddied_list); + zcache_zbud_buddied_count++; + +init_zh: + SET_SENTINEL(zh, ZBH); + zh->size = size; + zh->index = index; + zh->oid = *oid; + zh->pool_id = pool_id; + zh->client_id = client_id; + /* can wait to copy the data until the list locks are dropped */ + spin_unlock(&zbud_budlists_spinlock); + + to = zbud_data(zh, size); + memcpy(to, cdata, size); + spin_unlock(&zbpg->lock); + zbud_cumul_chunk_counts[nchunks]++; + atomic_inc(&zcache_zbud_curr_zpages); + zcache_zbud_cumul_zpages++; + zcache_zbud_curr_zbytes += size; + zcache_zbud_cumul_zbytes += size; +out: + return zh; +} + +static int zbud_decompress(struct page *page, struct zbud_hdr *zh) +{ + struct zbud_page *zbpg; + unsigned budnum = zbud_budnum(zh); + size_t out_len = PAGE_SIZE; + char *to_va, *from_va; + unsigned size; + int ret = 0; + + zbpg = container_of(zh, struct zbud_page, buddy[budnum]); + spin_lock(&zbpg->lock); + if (list_empty(&zbpg->bud_list)) { + /* ignore zombie page... see zbud_evict_pages() */ + ret = -EINVAL; + goto out; + } + ASSERT_SENTINEL(zh, ZBH); + BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); + to_va = kmap_atomic(page, KM_USER0); + size = zh->size; + from_va = zbud_data(zh, size); + ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); + BUG_ON(ret != LZO_E_OK); + BUG_ON(out_len != PAGE_SIZE); + kunmap_atomic(to_va, KM_USER0); +out: + spin_unlock(&zbpg->lock); + return ret; +} + +/* + * The following routines handle shrinking of ephemeral pages by evicting + * pages "least valuable" first. + */ + +static unsigned long zcache_evicted_raw_pages; +static unsigned long zcache_evicted_buddied_pages; +static unsigned long zcache_evicted_unbuddied_pages; + +static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, + uint16_t poolid); +static void zcache_put_pool(struct tmem_pool *pool); + +/* + * Flush and free all zbuds in a zbpg, then free the pageframe + */ +static void zbud_evict_zbpg(struct zbud_page *zbpg) +{ + struct zbud_hdr *zh; + int i, j; + uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS]; + uint32_t index[ZBUD_MAX_BUDS]; + struct tmem_oid oid[ZBUD_MAX_BUDS]; + struct tmem_pool *pool; + + ASSERT_SPINLOCK(&zbpg->lock); + BUG_ON(!list_empty(&zbpg->bud_list)); + for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) { + zh = &zbpg->buddy[i]; + if (zh->size) { + client_id[j] = zh->client_id; + pool_id[j] = zh->pool_id; + oid[j] = zh->oid; + index[j] = zh->index; + j++; + zbud_free(zh); + } + } + spin_unlock(&zbpg->lock); + for (i = 0; i < j; i++) { + pool = zcache_get_pool_by_id(client_id[i], pool_id[i]); + if (pool != NULL) { + tmem_flush_page(pool, &oid[i], index[i]); + zcache_put_pool(pool); + } + } + ASSERT_SENTINEL(zbpg, ZBPG); + spin_lock(&zbpg->lock); + zbud_free_raw_page(zbpg); +} + +/* + * Free nr pages. This code is funky because we want to hold the locks + * protecting various lists for as short a time as possible, and in some + * circumstances the list may change asynchronously when the list lock is + * not held. In some cases we also trylock not only to avoid waiting on a + * page in use by another cpu, but also to avoid potential deadlock due to + * lock inversion. + */ +static void zbud_evict_pages(int nr) +{ + struct zbud_page *zbpg; + int i; + + /* first try freeing any pages on unused list */ +retry_unused_list: + spin_lock_bh(&zbpg_unused_list_spinlock); + if (!list_empty(&zbpg_unused_list)) { + /* can't walk list here, since it may change when unlocked */ + zbpg = list_first_entry(&zbpg_unused_list, + struct zbud_page, bud_list); + list_del_init(&zbpg->bud_list); + zcache_zbpg_unused_list_count--; + atomic_dec(&zcache_zbud_curr_raw_pages); + spin_unlock_bh(&zbpg_unused_list_spinlock); + zcache_free_page(zbpg); + zcache_evicted_raw_pages++; + if (--nr <= 0) + goto out; + goto retry_unused_list; + } + spin_unlock_bh(&zbpg_unused_list_spinlock); + + /* now try freeing unbuddied pages, starting with least space avail */ + for (i = 0; i < MAX_CHUNK; i++) { +retry_unbud_list_i: + spin_lock_bh(&zbud_budlists_spinlock); + if (list_empty(&zbud_unbuddied[i].list)) { + spin_unlock_bh(&zbud_budlists_spinlock); + continue; + } + list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) { + if (unlikely(!spin_trylock(&zbpg->lock))) + continue; + list_del_init(&zbpg->bud_list); + zbud_unbuddied[i].count--; + spin_unlock(&zbud_budlists_spinlock); + zcache_evicted_unbuddied_pages++; + /* want budlists unlocked when doing zbpg eviction */ + zbud_evict_zbpg(zbpg); + local_bh_enable(); + if (--nr <= 0) + goto out; + goto retry_unbud_list_i; + } + spin_unlock_bh(&zbud_budlists_spinlock); + } + + /* as a last resort, free buddied pages */ +retry_bud_list: + spin_lock_bh(&zbud_budlists_spinlock); + if (list_empty(&zbud_buddied_list)) { + spin_unlock_bh(&zbud_budlists_spinlock); + goto out; + } + list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) { + if (unlikely(!spin_trylock(&zbpg->lock))) + continue; + list_del_init(&zbpg->bud_list); + zcache_zbud_buddied_count--; + spin_unlock(&zbud_budlists_spinlock); + zcache_evicted_buddied_pages++; + /* want budlists unlocked when doing zbpg eviction */ + zbud_evict_zbpg(zbpg); + local_bh_enable(); + if (--nr <= 0) + goto out; + goto retry_bud_list; + } + spin_unlock_bh(&zbud_budlists_spinlock); +out: + return; +} + +static void zbud_init(void) +{ + int i; + + INIT_LIST_HEAD(&zbud_buddied_list); + zcache_zbud_buddied_count = 0; + for (i = 0; i < NCHUNKS; i++) { + INIT_LIST_HEAD(&zbud_unbuddied[i].list); + zbud_unbuddied[i].count = 0; + } +} + +#ifdef CONFIG_SYSFS +/* + * These sysfs routines show a nice distribution of how many zbpg's are + * currently (and have ever been placed) in each unbuddied list. It's fun + * to watch but can probably go away before final merge. + */ +static int zbud_show_unbuddied_list_counts(char *buf) +{ + int i; + char *p = buf; + + for (i = 0; i < NCHUNKS; i++) + p += sprintf(p, "%u ", zbud_unbuddied[i].count); + return p - buf; +} + +static int zbud_show_cumul_chunk_counts(char *buf) +{ + unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0; + unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0; + unsigned long total_chunks_lte_42 = 0; + char *p = buf; + + for (i = 0; i < NCHUNKS; i++) { + p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]); + chunks += zbud_cumul_chunk_counts[i]; + total_chunks += zbud_cumul_chunk_counts[i]; + sum_total_chunks += i * zbud_cumul_chunk_counts[i]; + if (i == 21) + total_chunks_lte_21 = total_chunks; + if (i == 32) + total_chunks_lte_32 = total_chunks; + if (i == 42) + total_chunks_lte_42 = total_chunks; + } + p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n", + total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42, + chunks == 0 ? 0 : sum_total_chunks / chunks); + return p - buf; +} +#endif + +/********** + * This "zv" PAM implementation combines the TLSF-based xvMalloc + * with lzo1x compression to maximize the amount of data that can + * be packed into a physical page. + * + * Zv represents a PAM page with the index and object (plus a "size" value + * necessary for decompression) immediately preceding the compressed data. + */ + +#define ZVH_SENTINEL 0x43214321 + +struct zv_hdr { + uint32_t pool_id; + struct tmem_oid oid; + uint32_t index; + DECL_SENTINEL +}; + +/* rudimentary policy limits */ +/* total number of persistent pages may not exceed this percentage */ +static unsigned int zv_page_count_policy_percent = 75; +/* + * byte count defining poor compression; pages with greater zsize will be + * rejected + */ +static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7; +/* + * byte count defining poor *mean* compression; pages with greater zsize + * will be rejected until sufficient better-compressed pages are accepted + * driving the man below this threshold + */ +static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5; + +static unsigned long zv_curr_dist_counts[NCHUNKS]; +static unsigned long zv_cumul_dist_counts[NCHUNKS]; + +static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id, + struct tmem_oid *oid, uint32_t index, + void *cdata, unsigned clen) +{ + struct page *page; + struct zv_hdr *zv = NULL; + uint32_t offset; + int alloc_size = clen + sizeof(struct zv_hdr); + int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; + int ret; + + BUG_ON(!irqs_disabled()); + BUG_ON(chunks >= NCHUNKS); + ret = xv_malloc(xvpool, alloc_size, + &page, &offset, ZCACHE_GFP_MASK); + if (unlikely(ret)) + goto out; + zv_curr_dist_counts[chunks]++; + zv_cumul_dist_counts[chunks]++; + zv = kmap_atomic(page, KM_USER0) + offset; + zv->index = index; + zv->oid = *oid; + zv->pool_id = pool_id; + SET_SENTINEL(zv, ZVH); + memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); + kunmap_atomic(zv, KM_USER0); +out: + return zv; +} + +static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv) +{ + unsigned long flags; + struct page *page; + uint32_t offset; + uint16_t size = xv_get_object_size(zv); + int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; + + ASSERT_SENTINEL(zv, ZVH); + BUG_ON(chunks >= NCHUNKS); + zv_curr_dist_counts[chunks]--; + size -= sizeof(*zv); + BUG_ON(size == 0); + INVERT_SENTINEL(zv, ZVH); + page = virt_to_page(zv); + offset = (unsigned long)zv & ~PAGE_MASK; + local_irq_save(flags); + xv_free(xvpool, page, offset); + local_irq_restore(flags); +} + +static void zv_decompress(struct page *page, struct zv_hdr *zv) +{ + size_t clen = PAGE_SIZE; + char *to_va; + unsigned size; + int ret; + + ASSERT_SENTINEL(zv, ZVH); + size = xv_get_object_size(zv) - sizeof(*zv); + BUG_ON(size == 0); + to_va = kmap_atomic(page, KM_USER0); + ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), + size, to_va, &clen); + kunmap_atomic(to_va, KM_USER0); + BUG_ON(ret != LZO_E_OK); + BUG_ON(clen != PAGE_SIZE); +} + +#ifdef CONFIG_SYSFS +/* + * show a distribution of compression stats for zv pages. + */ + +static int zv_curr_dist_counts_show(char *buf) +{ + unsigned long i, n, chunks = 0, sum_total_chunks = 0; + char *p = buf; + + for (i = 0; i < NCHUNKS; i++) { + n = zv_curr_dist_counts[i]; + p += sprintf(p, "%lu ", n); + chunks += n; + sum_total_chunks += i * n; + } + p += sprintf(p, "mean:%lu\n", + chunks == 0 ? 0 : sum_total_chunks / chunks); + return p - buf; +} + +static int zv_cumul_dist_counts_show(char *buf) +{ + unsigned long i, n, chunks = 0, sum_total_chunks = 0; + char *p = buf; + + for (i = 0; i < NCHUNKS; i++) { + n = zv_cumul_dist_counts[i]; + p += sprintf(p, "%lu ", n); + chunks += n; + sum_total_chunks += i * n; + } + p += sprintf(p, "mean:%lu\n", + chunks == 0 ? 0 : sum_total_chunks / chunks); + return p - buf; +} + +/* + * setting zv_max_zsize via sysfs causes all persistent (e.g. swap) + * pages that don't compress to less than this value (including metadata + * overhead) to be rejected. We don't allow the value to get too close + * to PAGE_SIZE. + */ +static ssize_t zv_max_zsize_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", zv_max_zsize); +} + +static ssize_t zv_max_zsize_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + err = strict_strtoul(buf, 10, &val); + if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7)) + return -EINVAL; + zv_max_zsize = val; + return count; +} + +/* + * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap) + * pages that don't compress to less than this value (including metadata + * overhead) to be rejected UNLESS the mean compression is also smaller + * than this value. In other words, we are load-balancing-by-zsize the + * accepted pages. Again, we don't allow the value to get too close + * to PAGE_SIZE. + */ +static ssize_t zv_max_mean_zsize_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", zv_max_mean_zsize); +} + +static ssize_t zv_max_mean_zsize_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + err = strict_strtoul(buf, 10, &val); + if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7)) + return -EINVAL; + zv_max_mean_zsize = val; + return count; +} + +/* + * setting zv_page_count_policy_percent via sysfs sets an upper bound of + * persistent (e.g. swap) pages that will be retained according to: + * (zv_page_count_policy_percent * totalram_pages) / 100) + * when that limit is reached, further puts will be rejected (until + * some pages have been flushed). Note that, due to compression, + * this number may exceed 100; it defaults to 75 and we set an + * arbitary limit of 150. A poor choice will almost certainly result + * in OOM's, so this value should only be changed prudently. + */ +static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", zv_page_count_policy_percent); +} + +static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + int err; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + err = strict_strtoul(buf, 10, &val); + if (err || (val == 0) || (val > 150)) + return -EINVAL; + zv_page_count_policy_percent = val; + return count; +} + +static struct kobj_attribute zcache_zv_max_zsize_attr = { + .attr = { .name = "zv_max_zsize", .mode = 0644 }, + .show = zv_max_zsize_show, + .store = zv_max_zsize_store, +}; + +static struct kobj_attribute zcache_zv_max_mean_zsize_attr = { + .attr = { .name = "zv_max_mean_zsize", .mode = 0644 }, + .show = zv_max_mean_zsize_show, + .store = zv_max_mean_zsize_store, +}; + +static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = { + .attr = { .name = "zv_page_count_policy_percent", + .mode = 0644 }, + .show = zv_page_count_policy_percent_show, + .store = zv_page_count_policy_percent_store, +}; +#endif + +/* + * zcache core code starts here + */ + +/* useful stats not collected by cleancache or frontswap */ +static unsigned long zcache_flush_total; +static unsigned long zcache_flush_found; +static unsigned long zcache_flobj_total; +static unsigned long zcache_flobj_found; +static unsigned long zcache_failed_eph_puts; +static unsigned long zcache_failed_pers_puts; + +/* + * Tmem operations assume the poolid implies the invoking client. + * Zcache only has one client (the kernel itself): LOCAL_CLIENT. + * RAMster has each client numbered by cluster node, and a KVM version + * of zcache would have one client per guest and each client might + * have a poolid==N. + */ +static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid) +{ + struct tmem_pool *pool = NULL; + struct zcache_client *cli = NULL; + + if (cli_id == LOCAL_CLIENT) + cli = &zcache_host; + else { + if (cli_id >= MAX_CLIENTS) + goto out; + cli = &zcache_clients[cli_id]; + if (cli == NULL) + goto out; + atomic_inc(&cli->refcount); + } + if (poolid < MAX_POOLS_PER_CLIENT) { + pool = cli->tmem_pools[poolid]; + if (pool != NULL) + atomic_inc(&pool->refcount); + } +out: + return pool; +} + +static void zcache_put_pool(struct tmem_pool *pool) +{ + struct zcache_client *cli = NULL; + + if (pool == NULL) + BUG(); + cli = pool->client; + atomic_dec(&pool->refcount); + atomic_dec(&cli->refcount); +} + +int zcache_new_client(uint16_t cli_id) +{ + struct zcache_client *cli = NULL; + int ret = -1; + + if (cli_id == LOCAL_CLIENT) + cli = &zcache_host; + else if ((unsigned int)cli_id < MAX_CLIENTS) + cli = &zcache_clients[cli_id]; + if (cli == NULL) + goto out; + if (cli->allocated) + goto out; + cli->allocated = 1; +#ifdef CONFIG_FRONTSWAP + cli->xvpool = xv_create_pool(); + if (cli->xvpool == NULL) + goto out; +#endif + ret = 0; +out: + return ret; +} + +/* counters for debugging */ +static unsigned long zcache_failed_get_free_pages; +static unsigned long zcache_failed_alloc; +static unsigned long zcache_put_to_flush; +static unsigned long zcache_aborted_preload; +static unsigned long zcache_aborted_shrink; + +/* + * Ensure that memory allocation requests in zcache don't result + * in direct reclaim requests via the shrinker, which would cause + * an infinite loop. Maybe a GFP flag would be better? + */ +static DEFINE_SPINLOCK(zcache_direct_reclaim_lock); + +/* + * for now, used named slabs so can easily track usage; later can + * either just use kmalloc, or perhaps add a slab-like allocator + * to more carefully manage total memory utilization + */ +static struct kmem_cache *zcache_objnode_cache; +static struct kmem_cache *zcache_obj_cache; +static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0); +static unsigned long zcache_curr_obj_count_max; +static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0); +static unsigned long zcache_curr_objnode_count_max; + +/* + * to avoid memory allocation recursion (e.g. due to direct reclaim), we + * preload all necessary data structures so the hostops callbacks never + * actually do a malloc + */ +struct zcache_preload { + void *page; + struct tmem_obj *obj; + int nr; + struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH]; +}; +static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, }; + +static int zcache_do_preload(struct tmem_pool *pool) +{ + struct zcache_preload *kp; + struct tmem_objnode *objnode; + struct tmem_obj *obj; + void *page; + int ret = -ENOMEM; + + if (unlikely(zcache_objnode_cache == NULL)) + goto out; + if (unlikely(zcache_obj_cache == NULL)) + goto out; + if (!spin_trylock(&zcache_direct_reclaim_lock)) { + zcache_aborted_preload++; + goto out; + } + preempt_disable(); + kp = &__get_cpu_var(zcache_preloads); + while (kp->nr < ARRAY_SIZE(kp->objnodes)) { + preempt_enable_no_resched(); + objnode = kmem_cache_alloc(zcache_objnode_cache, + ZCACHE_GFP_MASK); + if (unlikely(objnode == NULL)) { + zcache_failed_alloc++; + goto unlock_out; + } + preempt_disable(); + kp = &__get_cpu_var(zcache_preloads); + if (kp->nr < ARRAY_SIZE(kp->objnodes)) + kp->objnodes[kp->nr++] = objnode; + else + kmem_cache_free(zcache_objnode_cache, objnode); + } + preempt_enable_no_resched(); + obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK); + if (unlikely(obj == NULL)) { + zcache_failed_alloc++; + goto unlock_out; + } + page = (void *)__get_free_page(ZCACHE_GFP_MASK); + if (unlikely(page == NULL)) { + zcache_failed_get_free_pages++; + kmem_cache_free(zcache_obj_cache, obj); + goto unlock_out; + } + preempt_disable(); + kp = &__get_cpu_var(zcache_preloads); + if (kp->obj == NULL) + kp->obj = obj; + else + kmem_cache_free(zcache_obj_cache, obj); + if (kp->page == NULL) + kp->page = page; + else + free_page((unsigned long)page); + ret = 0; +unlock_out: + spin_unlock(&zcache_direct_reclaim_lock); +out: + return ret; +} + +static void *zcache_get_free_page(void) +{ + struct zcache_preload *kp; + void *page; + + kp = &__get_cpu_var(zcache_preloads); + page = kp->page; + BUG_ON(page == NULL); + kp->page = NULL; + return page; +} + +static void zcache_free_page(void *p) +{ + free_page((unsigned long)p); +} + +/* + * zcache implementation for tmem host ops + */ + +static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool) +{ + struct tmem_objnode *objnode = NULL; + unsigned long count; + struct zcache_preload *kp; + + kp = &__get_cpu_var(zcache_preloads); + if (kp->nr <= 0) + goto out; + objnode = kp->objnodes[kp->nr - 1]; + BUG_ON(objnode == NULL); + kp->objnodes[kp->nr - 1] = NULL; + kp->nr--; + count = atomic_inc_return(&zcache_curr_objnode_count); + if (count > zcache_curr_objnode_count_max) + zcache_curr_objnode_count_max = count; +out: + return objnode; +} + +static void zcache_objnode_free(struct tmem_objnode *objnode, + struct tmem_pool *pool) +{ + atomic_dec(&zcache_curr_objnode_count); + BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0); + kmem_cache_free(zcache_objnode_cache, objnode); +} + +static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool) +{ + struct tmem_obj *obj = NULL; + unsigned long count; + struct zcache_preload *kp; + + kp = &__get_cpu_var(zcache_preloads); + obj = kp->obj; + BUG_ON(obj == NULL); + kp->obj = NULL; + count = atomic_inc_return(&zcache_curr_obj_count); + if (count > zcache_curr_obj_count_max) + zcache_curr_obj_count_max = count; + return obj; +} + +static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool) +{ + atomic_dec(&zcache_curr_obj_count); + BUG_ON(atomic_read(&zcache_curr_obj_count) < 0); + kmem_cache_free(zcache_obj_cache, obj); +} + +static struct tmem_hostops zcache_hostops = { + .obj_alloc = zcache_obj_alloc, + .obj_free = zcache_obj_free, + .objnode_alloc = zcache_objnode_alloc, + .objnode_free = zcache_objnode_free, +}; + +/* + * zcache implementations for PAM page descriptor ops + */ + +static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0); +static unsigned long zcache_curr_eph_pampd_count_max; +static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0); +static unsigned long zcache_curr_pers_pampd_count_max; + +/* forward reference */ +static int zcache_compress(struct page *from, void **out_va, size_t *out_len); + +static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, + struct tmem_pool *pool, struct tmem_oid *oid, + uint32_t index) +{ + void *pampd = NULL, *cdata; + size_t clen; + int ret; + unsigned long count; + struct page *page = virt_to_page(data); + struct zcache_client *cli = pool->client; + uint16_t client_id = get_client_id_from_client(cli); + unsigned long zv_mean_zsize; + unsigned long curr_pers_pampd_count; + + if (eph) { + ret = zcache_compress(page, &cdata, &clen); + if (ret == 0) + goto out; + if (clen == 0 || clen > zbud_max_buddy_size()) { + zcache_compress_poor++; + goto out; + } + pampd = (void *)zbud_create(client_id, pool->pool_id, oid, + index, page, cdata, clen); + if (pampd != NULL) { + count = atomic_inc_return(&zcache_curr_eph_pampd_count); + if (count > zcache_curr_eph_pampd_count_max) + zcache_curr_eph_pampd_count_max = count; + } + } else { + curr_pers_pampd_count = + atomic_read(&zcache_curr_pers_pampd_count); + if (curr_pers_pampd_count > + (zv_page_count_policy_percent * totalram_pages) / 100) + goto out; + ret = zcache_compress(page, &cdata, &clen); + if (ret == 0) + goto out; + /* reject if compression is too poor */ + if (clen > zv_max_zsize) { + zcache_compress_poor++; + goto out; + } + /* reject if mean compression is too poor */ + if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { + zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) / + curr_pers_pampd_count; + if (zv_mean_zsize > zv_max_mean_zsize) { + zcache_mean_compress_poor++; + goto out; + } + } + pampd = (void *)zv_create(cli->xvpool, pool->pool_id, + oid, index, cdata, clen); + if (pampd == NULL) + goto out; + count = atomic_inc_return(&zcache_curr_pers_pampd_count); + if (count > zcache_curr_pers_pampd_count_max) + zcache_curr_pers_pampd_count_max = count; + } +out: + return pampd; +} + +/* + * fill the pageframe corresponding to the struct page with the data + * from the passed pampd + */ +static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw, + void *pampd, struct tmem_pool *pool, + struct tmem_oid *oid, uint32_t index) +{ + int ret = 0; + + BUG_ON(is_ephemeral(pool)); + zv_decompress(virt_to_page(data), pampd); + return ret; +} + +/* + * fill the pageframe corresponding to the struct page with the data + * from the passed pampd + */ +static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw, + void *pampd, struct tmem_pool *pool, + struct tmem_oid *oid, uint32_t index) +{ + int ret = 0; + + BUG_ON(!is_ephemeral(pool)); + zbud_decompress(virt_to_page(data), pampd); + zbud_free_and_delist((struct zbud_hdr *)pampd); + atomic_dec(&zcache_curr_eph_pampd_count); + return ret; +} + +/* + * free the pampd and remove it from any zcache lists + * pampd must no longer be pointed to from any tmem data structures! + */ +static void zcache_pampd_free(void *pampd, struct tmem_pool *pool, + struct tmem_oid *oid, uint32_t index) +{ + struct zcache_client *cli = pool->client; + + if (is_ephemeral(pool)) { + zbud_free_and_delist((struct zbud_hdr *)pampd); + atomic_dec(&zcache_curr_eph_pampd_count); + BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0); + } else { + zv_free(cli->xvpool, (struct zv_hdr *)pampd); + atomic_dec(&zcache_curr_pers_pampd_count); + BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0); + } +} + +static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj) +{ +} + +static void zcache_pampd_new_obj(struct tmem_obj *obj) +{ +} + +static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj) +{ + return -1; +} + +static bool zcache_pampd_is_remote(void *pampd) +{ + return 0; +} + +static struct tmem_pamops zcache_pamops = { + .create = zcache_pampd_create, + .get_data = zcache_pampd_get_data, + .get_data_and_free = zcache_pampd_get_data_and_free, + .free = zcache_pampd_free, + .free_obj = zcache_pampd_free_obj, + .new_obj = zcache_pampd_new_obj, + .replace_in_obj = zcache_pampd_replace_in_obj, + .is_remote = zcache_pampd_is_remote, +}; + +/* + * zcache compression/decompression and related per-cpu stuff + */ + +#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS +#define LZO_DSTMEM_PAGE_ORDER 1 +static DEFINE_PER_CPU(unsigned char *, zcache_workmem); +static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); + +static int zcache_compress(struct page *from, void **out_va, size_t *out_len) +{ + int ret = 0; + unsigned char *dmem = __get_cpu_var(zcache_dstmem); + unsigned char *wmem = __get_cpu_var(zcache_workmem); + char *from_va; + + BUG_ON(!irqs_disabled()); + if (unlikely(dmem == NULL || wmem == NULL)) + goto out; /* no buffer, so can't compress */ + from_va = kmap_atomic(from, KM_USER0); + mb(); + ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); + BUG_ON(ret != LZO_E_OK); + *out_va = dmem; + kunmap_atomic(from_va, KM_USER0); + ret = 1; +out: + return ret; +} + + +static int zcache_cpu_notifier(struct notifier_block *nb, + unsigned long action, void *pcpu) +{ + int cpu = (long)pcpu; + struct zcache_preload *kp; + + switch (action) { + case CPU_UP_PREPARE: + per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages( + GFP_KERNEL | __GFP_REPEAT, + LZO_DSTMEM_PAGE_ORDER), + per_cpu(zcache_workmem, cpu) = + kzalloc(LZO1X_MEM_COMPRESS, + GFP_KERNEL | __GFP_REPEAT); + break; + case CPU_DEAD: + case CPU_UP_CANCELED: + free_pages((unsigned long)per_cpu(zcache_dstmem, cpu), + LZO_DSTMEM_PAGE_ORDER); + per_cpu(zcache_dstmem, cpu) = NULL; + kfree(per_cpu(zcache_workmem, cpu)); + per_cpu(zcache_workmem, cpu) = NULL; + kp = &per_cpu(zcache_preloads, cpu); + while (kp->nr) { + kmem_cache_free(zcache_objnode_cache, + kp->objnodes[kp->nr - 1]); + kp->objnodes[kp->nr - 1] = NULL; + kp->nr--; + } + kmem_cache_free(zcache_obj_cache, kp->obj); + free_page((unsigned long)kp->page); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block zcache_cpu_notifier_block = { + .notifier_call = zcache_cpu_notifier +}; + +#ifdef CONFIG_SYSFS +#define ZCACHE_SYSFS_RO(_name) \ + static ssize_t zcache_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%lu\n", zcache_##_name); \ + } \ + static struct kobj_attribute zcache_##_name##_attr = { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = zcache_##_name##_show, \ + } + +#define ZCACHE_SYSFS_RO_ATOMIC(_name) \ + static ssize_t zcache_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ + { \ + return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \ + } \ + static struct kobj_attribute zcache_##_name##_attr = { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = zcache_##_name##_show, \ + } + +#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \ + static ssize_t zcache_##_name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ + { \ + return _func(buf); \ + } \ + static struct kobj_attribute zcache_##_name##_attr = { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = zcache_##_name##_show, \ + } + +ZCACHE_SYSFS_RO(curr_obj_count_max); +ZCACHE_SYSFS_RO(curr_objnode_count_max); +ZCACHE_SYSFS_RO(flush_total); +ZCACHE_SYSFS_RO(flush_found); +ZCACHE_SYSFS_RO(flobj_total); +ZCACHE_SYSFS_RO(flobj_found); +ZCACHE_SYSFS_RO(failed_eph_puts); +ZCACHE_SYSFS_RO(failed_pers_puts); +ZCACHE_SYSFS_RO(zbud_curr_zbytes); +ZCACHE_SYSFS_RO(zbud_cumul_zpages); +ZCACHE_SYSFS_RO(zbud_cumul_zbytes); +ZCACHE_SYSFS_RO(zbud_buddied_count); +ZCACHE_SYSFS_RO(zbpg_unused_list_count); +ZCACHE_SYSFS_RO(evicted_raw_pages); +ZCACHE_SYSFS_RO(evicted_unbuddied_pages); +ZCACHE_SYSFS_RO(evicted_buddied_pages); +ZCACHE_SYSFS_RO(failed_get_free_pages); +ZCACHE_SYSFS_RO(failed_alloc); +ZCACHE_SYSFS_RO(put_to_flush); +ZCACHE_SYSFS_RO(aborted_preload); +ZCACHE_SYSFS_RO(aborted_shrink); +ZCACHE_SYSFS_RO(compress_poor); +ZCACHE_SYSFS_RO(mean_compress_poor); +ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages); +ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages); +ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count); +ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count); +ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts, + zbud_show_unbuddied_list_counts); +ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts, + zbud_show_cumul_chunk_counts); +ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts, + zv_curr_dist_counts_show); +ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts, + zv_cumul_dist_counts_show); + +static struct attribute *zcache_attrs[] = { + &zcache_curr_obj_count_attr.attr, + &zcache_curr_obj_count_max_attr.attr, + &zcache_curr_objnode_count_attr.attr, + &zcache_curr_objnode_count_max_attr.attr, + &zcache_flush_total_attr.attr, + &zcache_flobj_total_attr.attr, + &zcache_flush_found_attr.attr, + &zcache_flobj_found_attr.attr, + &zcache_failed_eph_puts_attr.attr, + &zcache_failed_pers_puts_attr.attr, + &zcache_compress_poor_attr.attr, + &zcache_mean_compress_poor_attr.attr, + &zcache_zbud_curr_raw_pages_attr.attr, + &zcache_zbud_curr_zpages_attr.attr, + &zcache_zbud_curr_zbytes_attr.attr, + &zcache_zbud_cumul_zpages_attr.attr, + &zcache_zbud_cumul_zbytes_attr.attr, + &zcache_zbud_buddied_count_attr.attr, + &zcache_zbpg_unused_list_count_attr.attr, + &zcache_evicted_raw_pages_attr.attr, + &zcache_evicted_unbuddied_pages_attr.attr, + &zcache_evicted_buddied_pages_attr.attr, + &zcache_failed_get_free_pages_attr.attr, + &zcache_failed_alloc_attr.attr, + &zcache_put_to_flush_attr.attr, + &zcache_aborted_preload_attr.attr, + &zcache_aborted_shrink_attr.attr, + &zcache_zbud_unbuddied_list_counts_attr.attr, + &zcache_zbud_cumul_chunk_counts_attr.attr, + &zcache_zv_curr_dist_counts_attr.attr, + &zcache_zv_cumul_dist_counts_attr.attr, + &zcache_zv_max_zsize_attr.attr, + &zcache_zv_max_mean_zsize_attr.attr, + &zcache_zv_page_count_policy_percent_attr.attr, + NULL, +}; + +static struct attribute_group zcache_attr_group = { + .attrs = zcache_attrs, + .name = "zcache", +}; + +#endif /* CONFIG_SYSFS */ +/* + * When zcache is disabled ("frozen"), pools can be created and destroyed, + * but all puts (and thus all other operations that require memory allocation) + * must fail. If zcache is unfrozen, accepts puts, then frozen again, + * data consistency requires all puts while frozen to be converted into + * flushes. + */ +static bool zcache_freeze; + +/* + * zcache shrinker interface (only useful for ephemeral pages, so zbud only) + */ +static int shrink_zcache_memory(struct shrinker *shrink, + struct shrink_control *sc) +{ + int ret = -1; + int nr = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; + + if (nr >= 0) { + if (!(gfp_mask & __GFP_FS)) + /* does this case really need to be skipped? */ + goto out; + if (spin_trylock(&zcache_direct_reclaim_lock)) { + zbud_evict_pages(nr); + spin_unlock(&zcache_direct_reclaim_lock); + } else + zcache_aborted_shrink++; + } + ret = (int)atomic_read(&zcache_zbud_curr_raw_pages); +out: + return ret; +} + +static struct shrinker zcache_shrinker = { + .shrink = shrink_zcache_memory, + .seeks = DEFAULT_SEEKS, +}; + +/* + * zcache shims between cleancache/frontswap ops and tmem + */ + +static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp, + uint32_t index, struct page *page) +{ + struct tmem_pool *pool; + int ret = -1; + + BUG_ON(!irqs_disabled()); + pool = zcache_get_pool_by_id(cli_id, pool_id); + if (unlikely(pool == NULL)) + goto out; + if (!zcache_freeze && zcache_do_preload(pool) == 0) { + /* preload does preempt_disable on success */ + ret = tmem_put(pool, oidp, index, page_address(page), + PAGE_SIZE, 0, is_ephemeral(pool)); + if (ret < 0) { + if (is_ephemeral(pool)) + zcache_failed_eph_puts++; + else + zcache_failed_pers_puts++; + } + zcache_put_pool(pool); + preempt_enable_no_resched(); + } else { + zcache_put_to_flush++; + if (atomic_read(&pool->obj_count) > 0) + /* the put fails whether the flush succeeds or not */ + (void)tmem_flush_page(pool, oidp, index); + zcache_put_pool(pool); + } +out: + return ret; +} + +static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp, + uint32_t index, struct page *page) +{ + struct tmem_pool *pool; + int ret = -1; + unsigned long flags; + size_t size = PAGE_SIZE; + + local_irq_save(flags); + pool = zcache_get_pool_by_id(cli_id, pool_id); + if (likely(pool != NULL)) { + if (atomic_read(&pool->obj_count) > 0) + ret = tmem_get(pool, oidp, index, page_address(page), + &size, 0, is_ephemeral(pool)); + zcache_put_pool(pool); + } + local_irq_restore(flags); + return ret; +} + +static int zcache_flush_page(int cli_id, int pool_id, + struct tmem_oid *oidp, uint32_t index) +{ + struct tmem_pool *pool; + int ret = -1; + unsigned long flags; + + local_irq_save(flags); + zcache_flush_total++; + pool = zcache_get_pool_by_id(cli_id, pool_id); + if (likely(pool != NULL)) { + if (atomic_read(&pool->obj_count) > 0) + ret = tmem_flush_page(pool, oidp, index); + zcache_put_pool(pool); + } + if (ret >= 0) + zcache_flush_found++; + local_irq_restore(flags); + return ret; +} + +static int zcache_flush_object(int cli_id, int pool_id, + struct tmem_oid *oidp) +{ + struct tmem_pool *pool; + int ret = -1; + unsigned long flags; + + local_irq_save(flags); + zcache_flobj_total++; + pool = zcache_get_pool_by_id(cli_id, pool_id); + if (likely(pool != NULL)) { + if (atomic_read(&pool->obj_count) > 0) + ret = tmem_flush_object(pool, oidp); + zcache_put_pool(pool); + } + if (ret >= 0) + zcache_flobj_found++; + local_irq_restore(flags); + return ret; +} + +static int zcache_destroy_pool(int cli_id, int pool_id) +{ + struct tmem_pool *pool = NULL; + struct zcache_client *cli = NULL; + int ret = -1; + + if (pool_id < 0) + goto out; + if (cli_id == LOCAL_CLIENT) + cli = &zcache_host; + else if ((unsigned int)cli_id < MAX_CLIENTS) + cli = &zcache_clients[cli_id]; + if (cli == NULL) + goto out; + atomic_inc(&cli->refcount); + pool = cli->tmem_pools[pool_id]; + if (pool == NULL) + goto out; + cli->tmem_pools[pool_id] = NULL; + /* wait for pool activity on other cpus to quiesce */ + while (atomic_read(&pool->refcount) != 0) + ; + atomic_dec(&cli->refcount); + local_bh_disable(); + ret = tmem_destroy_pool(pool); + local_bh_enable(); + kfree(pool); + pr_info("zcache: destroyed pool id=%d, cli_id=%d\n", + pool_id, cli_id); +out: + return ret; +} + +static int zcache_new_pool(uint16_t cli_id, uint32_t flags) +{ + int poolid = -1; + struct tmem_pool *pool; + struct zcache_client *cli = NULL; + + if (cli_id == LOCAL_CLIENT) + cli = &zcache_host; + else if ((unsigned int)cli_id < MAX_CLIENTS) + cli = &zcache_clients[cli_id]; + if (cli == NULL) + goto out; + atomic_inc(&cli->refcount); + pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL); + if (pool == NULL) { + pr_info("zcache: pool creation failed: out of memory\n"); + goto out; + } + + for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++) + if (cli->tmem_pools[poolid] == NULL) + break; + if (poolid >= MAX_POOLS_PER_CLIENT) { + pr_info("zcache: pool creation failed: max exceeded\n"); + kfree(pool); + poolid = -1; + goto out; + } + atomic_set(&pool->refcount, 0); + pool->client = cli; + pool->pool_id = poolid; + tmem_new_pool(pool, flags); + cli->tmem_pools[poolid] = pool; + pr_info("zcache: created %s tmem pool, id=%d, client=%d\n", + flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral", + poolid, cli_id); +out: + if (cli != NULL) + atomic_dec(&cli->refcount); + return poolid; +} + +/********** + * Two kernel functionalities currently can be layered on top of tmem. + * These are "cleancache" which is used as a second-chance cache for clean + * page cache pages; and "frontswap" which is used for swap pages + * to avoid writes to disk. A generic "shim" is provided here for each + * to translate in-kernel semantics to zcache semantics. + */ + +#ifdef CONFIG_CLEANCACHE +static void zcache_cleancache_put_page(int pool_id, + struct cleancache_filekey key, + pgoff_t index, struct page *page) +{ + u32 ind = (u32) index; + struct tmem_oid oid = *(struct tmem_oid *)&key; + + if (likely(ind == index)) + (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page); +} + +static int zcache_cleancache_get_page(int pool_id, + struct cleancache_filekey key, + pgoff_t index, struct page *page) +{ + u32 ind = (u32) index; + struct tmem_oid oid = *(struct tmem_oid *)&key; + int ret = -1; + + if (likely(ind == index)) + ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page); + return ret; +} + +static void zcache_cleancache_flush_page(int pool_id, + struct cleancache_filekey key, + pgoff_t index) +{ + u32 ind = (u32) index; + struct tmem_oid oid = *(struct tmem_oid *)&key; + + if (likely(ind == index)) + (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind); +} + +static void zcache_cleancache_flush_inode(int pool_id, + struct cleancache_filekey key) +{ + struct tmem_oid oid = *(struct tmem_oid *)&key; + + (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid); +} + +static void zcache_cleancache_flush_fs(int pool_id) +{ + if (pool_id >= 0) + (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id); +} + +static int zcache_cleancache_init_fs(size_t pagesize) +{ + BUG_ON(sizeof(struct cleancache_filekey) != + sizeof(struct tmem_oid)); + BUG_ON(pagesize != PAGE_SIZE); + return zcache_new_pool(LOCAL_CLIENT, 0); +} + +static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize) +{ + /* shared pools are unsupported and map to private */ + BUG_ON(sizeof(struct cleancache_filekey) != + sizeof(struct tmem_oid)); + BUG_ON(pagesize != PAGE_SIZE); + return zcache_new_pool(LOCAL_CLIENT, 0); +} + +static struct cleancache_ops zcache_cleancache_ops = { + .put_page = zcache_cleancache_put_page, + .get_page = zcache_cleancache_get_page, + .flush_page = zcache_cleancache_flush_page, + .flush_inode = zcache_cleancache_flush_inode, + .flush_fs = zcache_cleancache_flush_fs, + .init_shared_fs = zcache_cleancache_init_shared_fs, + .init_fs = zcache_cleancache_init_fs +}; + +struct cleancache_ops zcache_cleancache_register_ops(void) +{ + struct cleancache_ops old_ops = + cleancache_register_ops(&zcache_cleancache_ops); + + return old_ops; +} +#endif + +#ifdef CONFIG_FRONTSWAP +/* a single tmem poolid is used for all frontswap "types" (swapfiles) */ +static int zcache_frontswap_poolid = -1; + +/* + * Swizzling increases objects per swaptype, increasing tmem concurrency + * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS + */ +#define SWIZ_BITS 4 +#define SWIZ_MASK ((1 << SWIZ_BITS) - 1) +#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) +#define iswiz(_ind) (_ind >> SWIZ_BITS) + +static inline struct tmem_oid oswiz(unsigned type, u32 ind) +{ + struct tmem_oid oid = { .oid = { 0 } }; + oid.oid[0] = _oswiz(type, ind); + return oid; +} + +static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, + struct page *page) +{ + u64 ind64 = (u64)offset; + u32 ind = (u32)offset; + struct tmem_oid oid = oswiz(type, ind); + int ret = -1; + unsigned long flags; + + BUG_ON(!PageLocked(page)); + if (likely(ind64 == ind)) { + local_irq_save(flags); + ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid, + &oid, iswiz(ind), page); + local_irq_restore(flags); + } + return ret; +} + +/* returns 0 if the page was successfully gotten from frontswap, -1 if + * was not present (should never happen!) */ +static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, + struct page *page) +{ + u64 ind64 = (u64)offset; + u32 ind = (u32)offset; + struct tmem_oid oid = oswiz(type, ind); + int ret = -1; + + BUG_ON(!PageLocked(page)); + if (likely(ind64 == ind)) + ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid, + &oid, iswiz(ind), page); + return ret; +} + +/* flush a single page from frontswap */ +static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset) +{ + u64 ind64 = (u64)offset; + u32 ind = (u32)offset; + struct tmem_oid oid = oswiz(type, ind); + + if (likely(ind64 == ind)) + (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid, + &oid, iswiz(ind)); +} + +/* flush all pages from the passed swaptype */ +static void zcache_frontswap_flush_area(unsigned type) +{ + struct tmem_oid oid; + int ind; + + for (ind = SWIZ_MASK; ind >= 0; ind--) { + oid = oswiz(type, ind); + (void)zcache_flush_object(LOCAL_CLIENT, + zcache_frontswap_poolid, &oid); + } +} + +static void zcache_frontswap_init(unsigned ignored) +{ + /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ + if (zcache_frontswap_poolid < 0) + zcache_frontswap_poolid = + zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST); +} + +static struct frontswap_ops zcache_frontswap_ops = { + .put_page = zcache_frontswap_put_page, + .get_page = zcache_frontswap_get_page, + .flush_page = zcache_frontswap_flush_page, + .flush_area = zcache_frontswap_flush_area, + .init = zcache_frontswap_init +}; + +struct frontswap_ops zcache_frontswap_register_ops(void) +{ + struct frontswap_ops old_ops = + frontswap_register_ops(&zcache_frontswap_ops); + + return old_ops; +} +#endif + +/* + * zcache initialization + * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR + * NOTHING HAPPENS! + */ + +static int zcache_enabled; + +static int __init enable_zcache(char *s) +{ + zcache_enabled = 1; + return 1; +} +__setup("zcache", enable_zcache); + +/* allow independent dynamic disabling of cleancache and frontswap */ + +static int use_cleancache = 1; + +static int __init no_cleancache(char *s) +{ + use_cleancache = 0; + return 1; +} + +__setup("nocleancache", no_cleancache); + +static int use_frontswap = 1; + +static int __init no_frontswap(char *s) +{ + use_frontswap = 0; + return 1; +} + +__setup("nofrontswap", no_frontswap); + +static int __init zcache_init(void) +{ +#ifdef CONFIG_SYSFS + int ret = 0; + + ret = sysfs_create_group(mm_kobj, &zcache_attr_group); + if (ret) { + pr_err("zcache: can't create sysfs\n"); + goto out; + } +#endif /* CONFIG_SYSFS */ +#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP) + if (zcache_enabled) { + unsigned int cpu; + + tmem_register_hostops(&zcache_hostops); + tmem_register_pamops(&zcache_pamops); + ret = register_cpu_notifier(&zcache_cpu_notifier_block); + if (ret) { + pr_err("zcache: can't register cpu notifier\n"); + goto out; + } + for_each_online_cpu(cpu) { + void *pcpu = (void *)(long)cpu; + zcache_cpu_notifier(&zcache_cpu_notifier_block, + CPU_UP_PREPARE, pcpu); + } + } + zcache_objnode_cache = kmem_cache_create("zcache_objnode", + sizeof(struct tmem_objnode), 0, 0, NULL); + zcache_obj_cache = kmem_cache_create("zcache_obj", + sizeof(struct tmem_obj), 0, 0, NULL); + ret = zcache_new_client(LOCAL_CLIENT); + if (ret) { + pr_err("zcache: can't create client\n"); + goto out; + } +#endif +#ifdef CONFIG_CLEANCACHE + if (zcache_enabled && use_cleancache) { + struct cleancache_ops old_ops; + + zbud_init(); + register_shrinker(&zcache_shrinker); + old_ops = zcache_cleancache_register_ops(); + pr_info("zcache: cleancache enabled using kernel " + "transcendent memory and compression buddies\n"); + if (old_ops.init_fs != NULL) + pr_warning("zcache: cleancache_ops overridden"); + } +#endif +#ifdef CONFIG_FRONTSWAP + if (zcache_enabled && use_frontswap) { + struct frontswap_ops old_ops; + + old_ops = zcache_frontswap_register_ops(); + pr_info("zcache: frontswap enabled using kernel " + "transcendent memory and xvmalloc\n"); + if (old_ops.init != NULL) + pr_warning("ktmem: frontswap_ops overridden"); + } +#endif +out: + return ret; +} + +module_init(zcache_init) diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache.c deleted file mode 100644 index 65a81a0..0000000 --- a/drivers/staging/zcache/zcache.c +++ /dev/null @@ -1,1996 +0,0 @@ -/* - * zcache.c - * - * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp. - * Copyright (c) 2010,2011, Nitin Gupta - * - * Zcache provides an in-kernel "host implementation" for transcendent memory - * and, thus indirectly, for cleancache and frontswap. Zcache includes two - * page-accessible memory [1] interfaces, both utilizing lzo1x compression: - * 1) "compression buddies" ("zbud") is used for ephemeral pages - * 2) xvmalloc is used for persistent pages. - * Xvmalloc (based on the TLSF allocator) has very low fragmentation - * so maximizes space efficiency, while zbud allows pairs (and potentially, - * in the future, more than a pair of) compressed pages to be closely linked - * so that reclaiming can be done via the kernel's physical-page-oriented - * "shrinker" interface. - * - * [1] For a definition of page-accessible memory (aka PAM), see: - * http://marc.info/?l=linux-mm&m=127811271605009 - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include "tmem.h" - -#include "../zram/xvmalloc.h" /* if built in drivers/staging */ - -#if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP)) -#error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP" -#endif -#ifdef CONFIG_CLEANCACHE -#include -#endif -#ifdef CONFIG_FRONTSWAP -#include -#endif - -#if 0 -/* this is more aggressive but may cause other problems? */ -#define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN) -#else -#define ZCACHE_GFP_MASK \ - (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC) -#endif - -#define MAX_POOLS_PER_CLIENT 16 - -#define MAX_CLIENTS 16 -#define LOCAL_CLIENT ((uint16_t)-1) -struct zcache_client { - struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; - struct xv_pool *xvpool; - bool allocated; - atomic_t refcount; -}; - -static struct zcache_client zcache_host; -static struct zcache_client zcache_clients[MAX_CLIENTS]; - -static inline uint16_t get_client_id_from_client(struct zcache_client *cli) -{ - BUG_ON(cli == NULL); - if (cli == &zcache_host) - return LOCAL_CLIENT; - return cli - &zcache_clients[0]; -} - -static inline bool is_local_client(struct zcache_client *cli) -{ - return cli == &zcache_host; -} - -/********** - * Compression buddies ("zbud") provides for packing two (or, possibly - * in the future, more) compressed ephemeral pages into a single "raw" - * (physical) page and tracking them with data structures so that - * the raw pages can be easily reclaimed. - * - * A zbud page ("zbpg") is an aligned page containing a list_head, - * a lock, and two "zbud headers". The remainder of the physical - * page is divided up into aligned 64-byte "chunks" which contain - * the compressed data for zero, one, or two zbuds. Each zbpg - * resides on: (1) an "unused list" if it has no zbuds; (2) a - * "buddied" list if it is fully populated with two zbuds; or - * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks - * the one unbuddied zbud uses. The data inside a zbpg cannot be - * read or written unless the zbpg's lock is held. - */ - -#define ZBH_SENTINEL 0x43214321 -#define ZBPG_SENTINEL 0xdeadbeef - -#define ZBUD_MAX_BUDS 2 - -struct zbud_hdr { - uint16_t client_id; - uint16_t pool_id; - struct tmem_oid oid; - uint32_t index; - uint16_t size; /* compressed size in bytes, zero means unused */ - DECL_SENTINEL -}; - -struct zbud_page { - struct list_head bud_list; - spinlock_t lock; - struct zbud_hdr buddy[ZBUD_MAX_BUDS]; - DECL_SENTINEL - /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */ -}; - -#define CHUNK_SHIFT 6 -#define CHUNK_SIZE (1 << CHUNK_SHIFT) -#define CHUNK_MASK (~(CHUNK_SIZE-1)) -#define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \ - CHUNK_MASK) >> CHUNK_SHIFT) -#define MAX_CHUNK (NCHUNKS-1) - -static struct { - struct list_head list; - unsigned count; -} zbud_unbuddied[NCHUNKS]; -/* list N contains pages with N chunks USED and NCHUNKS-N unused */ -/* element 0 is never used but optimizing that isn't worth it */ -static unsigned long zbud_cumul_chunk_counts[NCHUNKS]; - -struct list_head zbud_buddied_list; -static unsigned long zcache_zbud_buddied_count; - -/* protects the buddied list and all unbuddied lists */ -static DEFINE_SPINLOCK(zbud_budlists_spinlock); - -static LIST_HEAD(zbpg_unused_list); -static unsigned long zcache_zbpg_unused_list_count; - -/* protects the unused page list */ -static DEFINE_SPINLOCK(zbpg_unused_list_spinlock); - -static atomic_t zcache_zbud_curr_raw_pages; -static atomic_t zcache_zbud_curr_zpages; -static unsigned long zcache_zbud_curr_zbytes; -static unsigned long zcache_zbud_cumul_zpages; -static unsigned long zcache_zbud_cumul_zbytes; -static unsigned long zcache_compress_poor; -static unsigned long zcache_mean_compress_poor; - -/* forward references */ -static void *zcache_get_free_page(void); -static void zcache_free_page(void *p); - -/* - * zbud helper functions - */ - -static inline unsigned zbud_max_buddy_size(void) -{ - return MAX_CHUNK << CHUNK_SHIFT; -} - -static inline unsigned zbud_size_to_chunks(unsigned size) -{ - BUG_ON(size == 0 || size > zbud_max_buddy_size()); - return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; -} - -static inline int zbud_budnum(struct zbud_hdr *zh) -{ - unsigned offset = (unsigned long)zh & (PAGE_SIZE - 1); - struct zbud_page *zbpg = NULL; - unsigned budnum = -1U; - int i; - - for (i = 0; i < ZBUD_MAX_BUDS; i++) - if (offset == offsetof(typeof(*zbpg), buddy[i])) { - budnum = i; - break; - } - BUG_ON(budnum == -1U); - return budnum; -} - -static char *zbud_data(struct zbud_hdr *zh, unsigned size) -{ - struct zbud_page *zbpg; - char *p; - unsigned budnum; - - ASSERT_SENTINEL(zh, ZBH); - budnum = zbud_budnum(zh); - BUG_ON(size == 0 || size > zbud_max_buddy_size()); - zbpg = container_of(zh, struct zbud_page, buddy[budnum]); - ASSERT_SPINLOCK(&zbpg->lock); - p = (char *)zbpg; - if (budnum == 0) - p += ((sizeof(struct zbud_page) + CHUNK_SIZE - 1) & - CHUNK_MASK); - else if (budnum == 1) - p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK); - return p; -} - -/* - * zbud raw page management - */ - -static struct zbud_page *zbud_alloc_raw_page(void) -{ - struct zbud_page *zbpg = NULL; - struct zbud_hdr *zh0, *zh1; - bool recycled = 0; - - /* if any pages on the zbpg list, use one */ - spin_lock(&zbpg_unused_list_spinlock); - if (!list_empty(&zbpg_unused_list)) { - zbpg = list_first_entry(&zbpg_unused_list, - struct zbud_page, bud_list); - list_del_init(&zbpg->bud_list); - zcache_zbpg_unused_list_count--; - recycled = 1; - } - spin_unlock(&zbpg_unused_list_spinlock); - if (zbpg == NULL) - /* none on zbpg list, try to get a kernel page */ - zbpg = zcache_get_free_page(); - if (likely(zbpg != NULL)) { - INIT_LIST_HEAD(&zbpg->bud_list); - zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1]; - spin_lock_init(&zbpg->lock); - if (recycled) { - ASSERT_INVERTED_SENTINEL(zbpg, ZBPG); - SET_SENTINEL(zbpg, ZBPG); - BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid)); - BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid)); - } else { - atomic_inc(&zcache_zbud_curr_raw_pages); - INIT_LIST_HEAD(&zbpg->bud_list); - SET_SENTINEL(zbpg, ZBPG); - zh0->size = 0; zh1->size = 0; - tmem_oid_set_invalid(&zh0->oid); - tmem_oid_set_invalid(&zh1->oid); - } - } - return zbpg; -} - -static void zbud_free_raw_page(struct zbud_page *zbpg) -{ - struct zbud_hdr *zh0 = &zbpg->buddy[0], *zh1 = &zbpg->buddy[1]; - - ASSERT_SENTINEL(zbpg, ZBPG); - BUG_ON(!list_empty(&zbpg->bud_list)); - ASSERT_SPINLOCK(&zbpg->lock); - BUG_ON(zh0->size != 0 || tmem_oid_valid(&zh0->oid)); - BUG_ON(zh1->size != 0 || tmem_oid_valid(&zh1->oid)); - INVERT_SENTINEL(zbpg, ZBPG); - spin_unlock(&zbpg->lock); - spin_lock(&zbpg_unused_list_spinlock); - list_add(&zbpg->bud_list, &zbpg_unused_list); - zcache_zbpg_unused_list_count++; - spin_unlock(&zbpg_unused_list_spinlock); -} - -/* - * core zbud handling routines - */ - -static unsigned zbud_free(struct zbud_hdr *zh) -{ - unsigned size; - - ASSERT_SENTINEL(zh, ZBH); - BUG_ON(!tmem_oid_valid(&zh->oid)); - size = zh->size; - BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); - zh->size = 0; - tmem_oid_set_invalid(&zh->oid); - INVERT_SENTINEL(zh, ZBH); - zcache_zbud_curr_zbytes -= size; - atomic_dec(&zcache_zbud_curr_zpages); - return size; -} - -static void zbud_free_and_delist(struct zbud_hdr *zh) -{ - unsigned chunks; - struct zbud_hdr *zh_other; - unsigned budnum = zbud_budnum(zh), size; - struct zbud_page *zbpg = - container_of(zh, struct zbud_page, buddy[budnum]); - - spin_lock(&zbpg->lock); - if (list_empty(&zbpg->bud_list)) { - /* ignore zombie page... see zbud_evict_pages() */ - spin_unlock(&zbpg->lock); - return; - } - size = zbud_free(zh); - ASSERT_SPINLOCK(&zbpg->lock); - zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0]; - if (zh_other->size == 0) { /* was unbuddied: unlist and free */ - chunks = zbud_size_to_chunks(size) ; - spin_lock(&zbud_budlists_spinlock); - BUG_ON(list_empty(&zbud_unbuddied[chunks].list)); - list_del_init(&zbpg->bud_list); - zbud_unbuddied[chunks].count--; - spin_unlock(&zbud_budlists_spinlock); - zbud_free_raw_page(zbpg); - } else { /* was buddied: move remaining buddy to unbuddied list */ - chunks = zbud_size_to_chunks(zh_other->size) ; - spin_lock(&zbud_budlists_spinlock); - list_del_init(&zbpg->bud_list); - zcache_zbud_buddied_count--; - list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list); - zbud_unbuddied[chunks].count++; - spin_unlock(&zbud_budlists_spinlock); - spin_unlock(&zbpg->lock); - } -} - -static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id, - struct tmem_oid *oid, - uint32_t index, struct page *page, - void *cdata, unsigned size) -{ - struct zbud_hdr *zh0, *zh1, *zh = NULL; - struct zbud_page *zbpg = NULL, *ztmp; - unsigned nchunks; - char *to; - int i, found_good_buddy = 0; - - nchunks = zbud_size_to_chunks(size) ; - for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) { - spin_lock(&zbud_budlists_spinlock); - if (!list_empty(&zbud_unbuddied[i].list)) { - list_for_each_entry_safe(zbpg, ztmp, - &zbud_unbuddied[i].list, bud_list) { - if (spin_trylock(&zbpg->lock)) { - found_good_buddy = i; - goto found_unbuddied; - } - } - } - spin_unlock(&zbud_budlists_spinlock); - } - /* didn't find a good buddy, try allocating a new page */ - zbpg = zbud_alloc_raw_page(); - if (unlikely(zbpg == NULL)) - goto out; - /* ok, have a page, now compress the data before taking locks */ - spin_lock(&zbpg->lock); - spin_lock(&zbud_budlists_spinlock); - list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list); - zbud_unbuddied[nchunks].count++; - zh = &zbpg->buddy[0]; - goto init_zh; - -found_unbuddied: - ASSERT_SPINLOCK(&zbpg->lock); - zh0 = &zbpg->buddy[0]; zh1 = &zbpg->buddy[1]; - BUG_ON(!((zh0->size == 0) ^ (zh1->size == 0))); - if (zh0->size != 0) { /* buddy0 in use, buddy1 is vacant */ - ASSERT_SENTINEL(zh0, ZBH); - zh = zh1; - } else if (zh1->size != 0) { /* buddy1 in use, buddy0 is vacant */ - ASSERT_SENTINEL(zh1, ZBH); - zh = zh0; - } else - BUG(); - list_del_init(&zbpg->bud_list); - zbud_unbuddied[found_good_buddy].count--; - list_add_tail(&zbpg->bud_list, &zbud_buddied_list); - zcache_zbud_buddied_count++; - -init_zh: - SET_SENTINEL(zh, ZBH); - zh->size = size; - zh->index = index; - zh->oid = *oid; - zh->pool_id = pool_id; - zh->client_id = client_id; - /* can wait to copy the data until the list locks are dropped */ - spin_unlock(&zbud_budlists_spinlock); - - to = zbud_data(zh, size); - memcpy(to, cdata, size); - spin_unlock(&zbpg->lock); - zbud_cumul_chunk_counts[nchunks]++; - atomic_inc(&zcache_zbud_curr_zpages); - zcache_zbud_cumul_zpages++; - zcache_zbud_curr_zbytes += size; - zcache_zbud_cumul_zbytes += size; -out: - return zh; -} - -static int zbud_decompress(struct page *page, struct zbud_hdr *zh) -{ - struct zbud_page *zbpg; - unsigned budnum = zbud_budnum(zh); - size_t out_len = PAGE_SIZE; - char *to_va, *from_va; - unsigned size; - int ret = 0; - - zbpg = container_of(zh, struct zbud_page, buddy[budnum]); - spin_lock(&zbpg->lock); - if (list_empty(&zbpg->bud_list)) { - /* ignore zombie page... see zbud_evict_pages() */ - ret = -EINVAL; - goto out; - } - ASSERT_SENTINEL(zh, ZBH); - BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); - to_va = kmap_atomic(page, KM_USER0); - size = zh->size; - from_va = zbud_data(zh, size); - ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); - BUG_ON(ret != LZO_E_OK); - BUG_ON(out_len != PAGE_SIZE); - kunmap_atomic(to_va, KM_USER0); -out: - spin_unlock(&zbpg->lock); - return ret; -} - -/* - * The following routines handle shrinking of ephemeral pages by evicting - * pages "least valuable" first. - */ - -static unsigned long zcache_evicted_raw_pages; -static unsigned long zcache_evicted_buddied_pages; -static unsigned long zcache_evicted_unbuddied_pages; - -static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, - uint16_t poolid); -static void zcache_put_pool(struct tmem_pool *pool); - -/* - * Flush and free all zbuds in a zbpg, then free the pageframe - */ -static void zbud_evict_zbpg(struct zbud_page *zbpg) -{ - struct zbud_hdr *zh; - int i, j; - uint32_t pool_id[ZBUD_MAX_BUDS], client_id[ZBUD_MAX_BUDS]; - uint32_t index[ZBUD_MAX_BUDS]; - struct tmem_oid oid[ZBUD_MAX_BUDS]; - struct tmem_pool *pool; - - ASSERT_SPINLOCK(&zbpg->lock); - BUG_ON(!list_empty(&zbpg->bud_list)); - for (i = 0, j = 0; i < ZBUD_MAX_BUDS; i++) { - zh = &zbpg->buddy[i]; - if (zh->size) { - client_id[j] = zh->client_id; - pool_id[j] = zh->pool_id; - oid[j] = zh->oid; - index[j] = zh->index; - j++; - zbud_free(zh); - } - } - spin_unlock(&zbpg->lock); - for (i = 0; i < j; i++) { - pool = zcache_get_pool_by_id(client_id[i], pool_id[i]); - if (pool != NULL) { - tmem_flush_page(pool, &oid[i], index[i]); - zcache_put_pool(pool); - } - } - ASSERT_SENTINEL(zbpg, ZBPG); - spin_lock(&zbpg->lock); - zbud_free_raw_page(zbpg); -} - -/* - * Free nr pages. This code is funky because we want to hold the locks - * protecting various lists for as short a time as possible, and in some - * circumstances the list may change asynchronously when the list lock is - * not held. In some cases we also trylock not only to avoid waiting on a - * page in use by another cpu, but also to avoid potential deadlock due to - * lock inversion. - */ -static void zbud_evict_pages(int nr) -{ - struct zbud_page *zbpg; - int i; - - /* first try freeing any pages on unused list */ -retry_unused_list: - spin_lock_bh(&zbpg_unused_list_spinlock); - if (!list_empty(&zbpg_unused_list)) { - /* can't walk list here, since it may change when unlocked */ - zbpg = list_first_entry(&zbpg_unused_list, - struct zbud_page, bud_list); - list_del_init(&zbpg->bud_list); - zcache_zbpg_unused_list_count--; - atomic_dec(&zcache_zbud_curr_raw_pages); - spin_unlock_bh(&zbpg_unused_list_spinlock); - zcache_free_page(zbpg); - zcache_evicted_raw_pages++; - if (--nr <= 0) - goto out; - goto retry_unused_list; - } - spin_unlock_bh(&zbpg_unused_list_spinlock); - - /* now try freeing unbuddied pages, starting with least space avail */ - for (i = 0; i < MAX_CHUNK; i++) { -retry_unbud_list_i: - spin_lock_bh(&zbud_budlists_spinlock); - if (list_empty(&zbud_unbuddied[i].list)) { - spin_unlock_bh(&zbud_budlists_spinlock); - continue; - } - list_for_each_entry(zbpg, &zbud_unbuddied[i].list, bud_list) { - if (unlikely(!spin_trylock(&zbpg->lock))) - continue; - list_del_init(&zbpg->bud_list); - zbud_unbuddied[i].count--; - spin_unlock(&zbud_budlists_spinlock); - zcache_evicted_unbuddied_pages++; - /* want budlists unlocked when doing zbpg eviction */ - zbud_evict_zbpg(zbpg); - local_bh_enable(); - if (--nr <= 0) - goto out; - goto retry_unbud_list_i; - } - spin_unlock_bh(&zbud_budlists_spinlock); - } - - /* as a last resort, free buddied pages */ -retry_bud_list: - spin_lock_bh(&zbud_budlists_spinlock); - if (list_empty(&zbud_buddied_list)) { - spin_unlock_bh(&zbud_budlists_spinlock); - goto out; - } - list_for_each_entry(zbpg, &zbud_buddied_list, bud_list) { - if (unlikely(!spin_trylock(&zbpg->lock))) - continue; - list_del_init(&zbpg->bud_list); - zcache_zbud_buddied_count--; - spin_unlock(&zbud_budlists_spinlock); - zcache_evicted_buddied_pages++; - /* want budlists unlocked when doing zbpg eviction */ - zbud_evict_zbpg(zbpg); - local_bh_enable(); - if (--nr <= 0) - goto out; - goto retry_bud_list; - } - spin_unlock_bh(&zbud_budlists_spinlock); -out: - return; -} - -static void zbud_init(void) -{ - int i; - - INIT_LIST_HEAD(&zbud_buddied_list); - zcache_zbud_buddied_count = 0; - for (i = 0; i < NCHUNKS; i++) { - INIT_LIST_HEAD(&zbud_unbuddied[i].list); - zbud_unbuddied[i].count = 0; - } -} - -#ifdef CONFIG_SYSFS -/* - * These sysfs routines show a nice distribution of how many zbpg's are - * currently (and have ever been placed) in each unbuddied list. It's fun - * to watch but can probably go away before final merge. - */ -static int zbud_show_unbuddied_list_counts(char *buf) -{ - int i; - char *p = buf; - - for (i = 0; i < NCHUNKS; i++) - p += sprintf(p, "%u ", zbud_unbuddied[i].count); - return p - buf; -} - -static int zbud_show_cumul_chunk_counts(char *buf) -{ - unsigned long i, chunks = 0, total_chunks = 0, sum_total_chunks = 0; - unsigned long total_chunks_lte_21 = 0, total_chunks_lte_32 = 0; - unsigned long total_chunks_lte_42 = 0; - char *p = buf; - - for (i = 0; i < NCHUNKS; i++) { - p += sprintf(p, "%lu ", zbud_cumul_chunk_counts[i]); - chunks += zbud_cumul_chunk_counts[i]; - total_chunks += zbud_cumul_chunk_counts[i]; - sum_total_chunks += i * zbud_cumul_chunk_counts[i]; - if (i == 21) - total_chunks_lte_21 = total_chunks; - if (i == 32) - total_chunks_lte_32 = total_chunks; - if (i == 42) - total_chunks_lte_42 = total_chunks; - } - p += sprintf(p, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n", - total_chunks_lte_21, total_chunks_lte_32, total_chunks_lte_42, - chunks == 0 ? 0 : sum_total_chunks / chunks); - return p - buf; -} -#endif - -/********** - * This "zv" PAM implementation combines the TLSF-based xvMalloc - * with lzo1x compression to maximize the amount of data that can - * be packed into a physical page. - * - * Zv represents a PAM page with the index and object (plus a "size" value - * necessary for decompression) immediately preceding the compressed data. - */ - -#define ZVH_SENTINEL 0x43214321 - -struct zv_hdr { - uint32_t pool_id; - struct tmem_oid oid; - uint32_t index; - DECL_SENTINEL -}; - -/* rudimentary policy limits */ -/* total number of persistent pages may not exceed this percentage */ -static unsigned int zv_page_count_policy_percent = 75; -/* - * byte count defining poor compression; pages with greater zsize will be - * rejected - */ -static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7; -/* - * byte count defining poor *mean* compression; pages with greater zsize - * will be rejected until sufficient better-compressed pages are accepted - * driving the man below this threshold - */ -static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5; - -static unsigned long zv_curr_dist_counts[NCHUNKS]; -static unsigned long zv_cumul_dist_counts[NCHUNKS]; - -static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id, - struct tmem_oid *oid, uint32_t index, - void *cdata, unsigned clen) -{ - struct page *page; - struct zv_hdr *zv = NULL; - uint32_t offset; - int alloc_size = clen + sizeof(struct zv_hdr); - int chunks = (alloc_size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; - int ret; - - BUG_ON(!irqs_disabled()); - BUG_ON(chunks >= NCHUNKS); - ret = xv_malloc(xvpool, alloc_size, - &page, &offset, ZCACHE_GFP_MASK); - if (unlikely(ret)) - goto out; - zv_curr_dist_counts[chunks]++; - zv_cumul_dist_counts[chunks]++; - zv = kmap_atomic(page, KM_USER0) + offset; - zv->index = index; - zv->oid = *oid; - zv->pool_id = pool_id; - SET_SENTINEL(zv, ZVH); - memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); - kunmap_atomic(zv, KM_USER0); -out: - return zv; -} - -static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv) -{ - unsigned long flags; - struct page *page; - uint32_t offset; - uint16_t size = xv_get_object_size(zv); - int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; - - ASSERT_SENTINEL(zv, ZVH); - BUG_ON(chunks >= NCHUNKS); - zv_curr_dist_counts[chunks]--; - size -= sizeof(*zv); - BUG_ON(size == 0); - INVERT_SENTINEL(zv, ZVH); - page = virt_to_page(zv); - offset = (unsigned long)zv & ~PAGE_MASK; - local_irq_save(flags); - xv_free(xvpool, page, offset); - local_irq_restore(flags); -} - -static void zv_decompress(struct page *page, struct zv_hdr *zv) -{ - size_t clen = PAGE_SIZE; - char *to_va; - unsigned size; - int ret; - - ASSERT_SENTINEL(zv, ZVH); - size = xv_get_object_size(zv) - sizeof(*zv); - BUG_ON(size == 0); - to_va = kmap_atomic(page, KM_USER0); - ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), - size, to_va, &clen); - kunmap_atomic(to_va, KM_USER0); - BUG_ON(ret != LZO_E_OK); - BUG_ON(clen != PAGE_SIZE); -} - -#ifdef CONFIG_SYSFS -/* - * show a distribution of compression stats for zv pages. - */ - -static int zv_curr_dist_counts_show(char *buf) -{ - unsigned long i, n, chunks = 0, sum_total_chunks = 0; - char *p = buf; - - for (i = 0; i < NCHUNKS; i++) { - n = zv_curr_dist_counts[i]; - p += sprintf(p, "%lu ", n); - chunks += n; - sum_total_chunks += i * n; - } - p += sprintf(p, "mean:%lu\n", - chunks == 0 ? 0 : sum_total_chunks / chunks); - return p - buf; -} - -static int zv_cumul_dist_counts_show(char *buf) -{ - unsigned long i, n, chunks = 0, sum_total_chunks = 0; - char *p = buf; - - for (i = 0; i < NCHUNKS; i++) { - n = zv_cumul_dist_counts[i]; - p += sprintf(p, "%lu ", n); - chunks += n; - sum_total_chunks += i * n; - } - p += sprintf(p, "mean:%lu\n", - chunks == 0 ? 0 : sum_total_chunks / chunks); - return p - buf; -} - -/* - * setting zv_max_zsize via sysfs causes all persistent (e.g. swap) - * pages that don't compress to less than this value (including metadata - * overhead) to be rejected. We don't allow the value to get too close - * to PAGE_SIZE. - */ -static ssize_t zv_max_zsize_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", zv_max_zsize); -} - -static ssize_t zv_max_zsize_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - unsigned long val; - int err; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - err = strict_strtoul(buf, 10, &val); - if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7)) - return -EINVAL; - zv_max_zsize = val; - return count; -} - -/* - * setting zv_max_mean_zsize via sysfs causes all persistent (e.g. swap) - * pages that don't compress to less than this value (including metadata - * overhead) to be rejected UNLESS the mean compression is also smaller - * than this value. In other words, we are load-balancing-by-zsize the - * accepted pages. Again, we don't allow the value to get too close - * to PAGE_SIZE. - */ -static ssize_t zv_max_mean_zsize_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", zv_max_mean_zsize); -} - -static ssize_t zv_max_mean_zsize_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - unsigned long val; - int err; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - err = strict_strtoul(buf, 10, &val); - if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7)) - return -EINVAL; - zv_max_mean_zsize = val; - return count; -} - -/* - * setting zv_page_count_policy_percent via sysfs sets an upper bound of - * persistent (e.g. swap) pages that will be retained according to: - * (zv_page_count_policy_percent * totalram_pages) / 100) - * when that limit is reached, further puts will be rejected (until - * some pages have been flushed). Note that, due to compression, - * this number may exceed 100; it defaults to 75 and we set an - * arbitary limit of 150. A poor choice will almost certainly result - * in OOM's, so this value should only be changed prudently. - */ -static ssize_t zv_page_count_policy_percent_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", zv_page_count_policy_percent); -} - -static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - unsigned long val; - int err; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - err = strict_strtoul(buf, 10, &val); - if (err || (val == 0) || (val > 150)) - return -EINVAL; - zv_page_count_policy_percent = val; - return count; -} - -static struct kobj_attribute zcache_zv_max_zsize_attr = { - .attr = { .name = "zv_max_zsize", .mode = 0644 }, - .show = zv_max_zsize_show, - .store = zv_max_zsize_store, -}; - -static struct kobj_attribute zcache_zv_max_mean_zsize_attr = { - .attr = { .name = "zv_max_mean_zsize", .mode = 0644 }, - .show = zv_max_mean_zsize_show, - .store = zv_max_mean_zsize_store, -}; - -static struct kobj_attribute zcache_zv_page_count_policy_percent_attr = { - .attr = { .name = "zv_page_count_policy_percent", - .mode = 0644 }, - .show = zv_page_count_policy_percent_show, - .store = zv_page_count_policy_percent_store, -}; -#endif - -/* - * zcache core code starts here - */ - -/* useful stats not collected by cleancache or frontswap */ -static unsigned long zcache_flush_total; -static unsigned long zcache_flush_found; -static unsigned long zcache_flobj_total; -static unsigned long zcache_flobj_found; -static unsigned long zcache_failed_eph_puts; -static unsigned long zcache_failed_pers_puts; - -/* - * Tmem operations assume the poolid implies the invoking client. - * Zcache only has one client (the kernel itself): LOCAL_CLIENT. - * RAMster has each client numbered by cluster node, and a KVM version - * of zcache would have one client per guest and each client might - * have a poolid==N. - */ -static struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid) -{ - struct tmem_pool *pool = NULL; - struct zcache_client *cli = NULL; - - if (cli_id == LOCAL_CLIENT) - cli = &zcache_host; - else { - if (cli_id >= MAX_CLIENTS) - goto out; - cli = &zcache_clients[cli_id]; - if (cli == NULL) - goto out; - atomic_inc(&cli->refcount); - } - if (poolid < MAX_POOLS_PER_CLIENT) { - pool = cli->tmem_pools[poolid]; - if (pool != NULL) - atomic_inc(&pool->refcount); - } -out: - return pool; -} - -static void zcache_put_pool(struct tmem_pool *pool) -{ - struct zcache_client *cli = NULL; - - if (pool == NULL) - BUG(); - cli = pool->client; - atomic_dec(&pool->refcount); - atomic_dec(&cli->refcount); -} - -int zcache_new_client(uint16_t cli_id) -{ - struct zcache_client *cli = NULL; - int ret = -1; - - if (cli_id == LOCAL_CLIENT) - cli = &zcache_host; - else if ((unsigned int)cli_id < MAX_CLIENTS) - cli = &zcache_clients[cli_id]; - if (cli == NULL) - goto out; - if (cli->allocated) - goto out; - cli->allocated = 1; -#ifdef CONFIG_FRONTSWAP - cli->xvpool = xv_create_pool(); - if (cli->xvpool == NULL) - goto out; -#endif - ret = 0; -out: - return ret; -} - -/* counters for debugging */ -static unsigned long zcache_failed_get_free_pages; -static unsigned long zcache_failed_alloc; -static unsigned long zcache_put_to_flush; -static unsigned long zcache_aborted_preload; -static unsigned long zcache_aborted_shrink; - -/* - * Ensure that memory allocation requests in zcache don't result - * in direct reclaim requests via the shrinker, which would cause - * an infinite loop. Maybe a GFP flag would be better? - */ -static DEFINE_SPINLOCK(zcache_direct_reclaim_lock); - -/* - * for now, used named slabs so can easily track usage; later can - * either just use kmalloc, or perhaps add a slab-like allocator - * to more carefully manage total memory utilization - */ -static struct kmem_cache *zcache_objnode_cache; -static struct kmem_cache *zcache_obj_cache; -static atomic_t zcache_curr_obj_count = ATOMIC_INIT(0); -static unsigned long zcache_curr_obj_count_max; -static atomic_t zcache_curr_objnode_count = ATOMIC_INIT(0); -static unsigned long zcache_curr_objnode_count_max; - -/* - * to avoid memory allocation recursion (e.g. due to direct reclaim), we - * preload all necessary data structures so the hostops callbacks never - * actually do a malloc - */ -struct zcache_preload { - void *page; - struct tmem_obj *obj; - int nr; - struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH]; -}; -static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, }; - -static int zcache_do_preload(struct tmem_pool *pool) -{ - struct zcache_preload *kp; - struct tmem_objnode *objnode; - struct tmem_obj *obj; - void *page; - int ret = -ENOMEM; - - if (unlikely(zcache_objnode_cache == NULL)) - goto out; - if (unlikely(zcache_obj_cache == NULL)) - goto out; - if (!spin_trylock(&zcache_direct_reclaim_lock)) { - zcache_aborted_preload++; - goto out; - } - preempt_disable(); - kp = &__get_cpu_var(zcache_preloads); - while (kp->nr < ARRAY_SIZE(kp->objnodes)) { - preempt_enable_no_resched(); - objnode = kmem_cache_alloc(zcache_objnode_cache, - ZCACHE_GFP_MASK); - if (unlikely(objnode == NULL)) { - zcache_failed_alloc++; - goto unlock_out; - } - preempt_disable(); - kp = &__get_cpu_var(zcache_preloads); - if (kp->nr < ARRAY_SIZE(kp->objnodes)) - kp->objnodes[kp->nr++] = objnode; - else - kmem_cache_free(zcache_objnode_cache, objnode); - } - preempt_enable_no_resched(); - obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK); - if (unlikely(obj == NULL)) { - zcache_failed_alloc++; - goto unlock_out; - } - page = (void *)__get_free_page(ZCACHE_GFP_MASK); - if (unlikely(page == NULL)) { - zcache_failed_get_free_pages++; - kmem_cache_free(zcache_obj_cache, obj); - goto unlock_out; - } - preempt_disable(); - kp = &__get_cpu_var(zcache_preloads); - if (kp->obj == NULL) - kp->obj = obj; - else - kmem_cache_free(zcache_obj_cache, obj); - if (kp->page == NULL) - kp->page = page; - else - free_page((unsigned long)page); - ret = 0; -unlock_out: - spin_unlock(&zcache_direct_reclaim_lock); -out: - return ret; -} - -static void *zcache_get_free_page(void) -{ - struct zcache_preload *kp; - void *page; - - kp = &__get_cpu_var(zcache_preloads); - page = kp->page; - BUG_ON(page == NULL); - kp->page = NULL; - return page; -} - -static void zcache_free_page(void *p) -{ - free_page((unsigned long)p); -} - -/* - * zcache implementation for tmem host ops - */ - -static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool) -{ - struct tmem_objnode *objnode = NULL; - unsigned long count; - struct zcache_preload *kp; - - kp = &__get_cpu_var(zcache_preloads); - if (kp->nr <= 0) - goto out; - objnode = kp->objnodes[kp->nr - 1]; - BUG_ON(objnode == NULL); - kp->objnodes[kp->nr - 1] = NULL; - kp->nr--; - count = atomic_inc_return(&zcache_curr_objnode_count); - if (count > zcache_curr_objnode_count_max) - zcache_curr_objnode_count_max = count; -out: - return objnode; -} - -static void zcache_objnode_free(struct tmem_objnode *objnode, - struct tmem_pool *pool) -{ - atomic_dec(&zcache_curr_objnode_count); - BUG_ON(atomic_read(&zcache_curr_objnode_count) < 0); - kmem_cache_free(zcache_objnode_cache, objnode); -} - -static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool) -{ - struct tmem_obj *obj = NULL; - unsigned long count; - struct zcache_preload *kp; - - kp = &__get_cpu_var(zcache_preloads); - obj = kp->obj; - BUG_ON(obj == NULL); - kp->obj = NULL; - count = atomic_inc_return(&zcache_curr_obj_count); - if (count > zcache_curr_obj_count_max) - zcache_curr_obj_count_max = count; - return obj; -} - -static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool) -{ - atomic_dec(&zcache_curr_obj_count); - BUG_ON(atomic_read(&zcache_curr_obj_count) < 0); - kmem_cache_free(zcache_obj_cache, obj); -} - -static struct tmem_hostops zcache_hostops = { - .obj_alloc = zcache_obj_alloc, - .obj_free = zcache_obj_free, - .objnode_alloc = zcache_objnode_alloc, - .objnode_free = zcache_objnode_free, -}; - -/* - * zcache implementations for PAM page descriptor ops - */ - -static atomic_t zcache_curr_eph_pampd_count = ATOMIC_INIT(0); -static unsigned long zcache_curr_eph_pampd_count_max; -static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0); -static unsigned long zcache_curr_pers_pampd_count_max; - -/* forward reference */ -static int zcache_compress(struct page *from, void **out_va, size_t *out_len); - -static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, - struct tmem_pool *pool, struct tmem_oid *oid, - uint32_t index) -{ - void *pampd = NULL, *cdata; - size_t clen; - int ret; - unsigned long count; - struct page *page = virt_to_page(data); - struct zcache_client *cli = pool->client; - uint16_t client_id = get_client_id_from_client(cli); - unsigned long zv_mean_zsize; - unsigned long curr_pers_pampd_count; - - if (eph) { - ret = zcache_compress(page, &cdata, &clen); - if (ret == 0) - goto out; - if (clen == 0 || clen > zbud_max_buddy_size()) { - zcache_compress_poor++; - goto out; - } - pampd = (void *)zbud_create(client_id, pool->pool_id, oid, - index, page, cdata, clen); - if (pampd != NULL) { - count = atomic_inc_return(&zcache_curr_eph_pampd_count); - if (count > zcache_curr_eph_pampd_count_max) - zcache_curr_eph_pampd_count_max = count; - } - } else { - curr_pers_pampd_count = - atomic_read(&zcache_curr_pers_pampd_count); - if (curr_pers_pampd_count > - (zv_page_count_policy_percent * totalram_pages) / 100) - goto out; - ret = zcache_compress(page, &cdata, &clen); - if (ret == 0) - goto out; - /* reject if compression is too poor */ - if (clen > zv_max_zsize) { - zcache_compress_poor++; - goto out; - } - /* reject if mean compression is too poor */ - if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { - zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) / - curr_pers_pampd_count; - if (zv_mean_zsize > zv_max_mean_zsize) { - zcache_mean_compress_poor++; - goto out; - } - } - pampd = (void *)zv_create(cli->xvpool, pool->pool_id, - oid, index, cdata, clen); - if (pampd == NULL) - goto out; - count = atomic_inc_return(&zcache_curr_pers_pampd_count); - if (count > zcache_curr_pers_pampd_count_max) - zcache_curr_pers_pampd_count_max = count; - } -out: - return pampd; -} - -/* - * fill the pageframe corresponding to the struct page with the data - * from the passed pampd - */ -static int zcache_pampd_get_data(char *data, size_t *bufsize, bool raw, - void *pampd, struct tmem_pool *pool, - struct tmem_oid *oid, uint32_t index) -{ - int ret = 0; - - BUG_ON(is_ephemeral(pool)); - zv_decompress(virt_to_page(data), pampd); - return ret; -} - -/* - * fill the pageframe corresponding to the struct page with the data - * from the passed pampd - */ -static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw, - void *pampd, struct tmem_pool *pool, - struct tmem_oid *oid, uint32_t index) -{ - int ret = 0; - - BUG_ON(!is_ephemeral(pool)); - zbud_decompress(virt_to_page(data), pampd); - zbud_free_and_delist((struct zbud_hdr *)pampd); - atomic_dec(&zcache_curr_eph_pampd_count); - return ret; -} - -/* - * free the pampd and remove it from any zcache lists - * pampd must no longer be pointed to from any tmem data structures! - */ -static void zcache_pampd_free(void *pampd, struct tmem_pool *pool, - struct tmem_oid *oid, uint32_t index) -{ - struct zcache_client *cli = pool->client; - - if (is_ephemeral(pool)) { - zbud_free_and_delist((struct zbud_hdr *)pampd); - atomic_dec(&zcache_curr_eph_pampd_count); - BUG_ON(atomic_read(&zcache_curr_eph_pampd_count) < 0); - } else { - zv_free(cli->xvpool, (struct zv_hdr *)pampd); - atomic_dec(&zcache_curr_pers_pampd_count); - BUG_ON(atomic_read(&zcache_curr_pers_pampd_count) < 0); - } -} - -static void zcache_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj) -{ -} - -static void zcache_pampd_new_obj(struct tmem_obj *obj) -{ -} - -static int zcache_pampd_replace_in_obj(void *pampd, struct tmem_obj *obj) -{ - return -1; -} - -static bool zcache_pampd_is_remote(void *pampd) -{ - return 0; -} - -static struct tmem_pamops zcache_pamops = { - .create = zcache_pampd_create, - .get_data = zcache_pampd_get_data, - .get_data_and_free = zcache_pampd_get_data_and_free, - .free = zcache_pampd_free, - .free_obj = zcache_pampd_free_obj, - .new_obj = zcache_pampd_new_obj, - .replace_in_obj = zcache_pampd_replace_in_obj, - .is_remote = zcache_pampd_is_remote, -}; - -/* - * zcache compression/decompression and related per-cpu stuff - */ - -#define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS -#define LZO_DSTMEM_PAGE_ORDER 1 -static DEFINE_PER_CPU(unsigned char *, zcache_workmem); -static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); - -static int zcache_compress(struct page *from, void **out_va, size_t *out_len) -{ - int ret = 0; - unsigned char *dmem = __get_cpu_var(zcache_dstmem); - unsigned char *wmem = __get_cpu_var(zcache_workmem); - char *from_va; - - BUG_ON(!irqs_disabled()); - if (unlikely(dmem == NULL || wmem == NULL)) - goto out; /* no buffer, so can't compress */ - from_va = kmap_atomic(from, KM_USER0); - mb(); - ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); - BUG_ON(ret != LZO_E_OK); - *out_va = dmem; - kunmap_atomic(from_va, KM_USER0); - ret = 1; -out: - return ret; -} - - -static int zcache_cpu_notifier(struct notifier_block *nb, - unsigned long action, void *pcpu) -{ - int cpu = (long)pcpu; - struct zcache_preload *kp; - - switch (action) { - case CPU_UP_PREPARE: - per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages( - GFP_KERNEL | __GFP_REPEAT, - LZO_DSTMEM_PAGE_ORDER), - per_cpu(zcache_workmem, cpu) = - kzalloc(LZO1X_MEM_COMPRESS, - GFP_KERNEL | __GFP_REPEAT); - break; - case CPU_DEAD: - case CPU_UP_CANCELED: - free_pages((unsigned long)per_cpu(zcache_dstmem, cpu), - LZO_DSTMEM_PAGE_ORDER); - per_cpu(zcache_dstmem, cpu) = NULL; - kfree(per_cpu(zcache_workmem, cpu)); - per_cpu(zcache_workmem, cpu) = NULL; - kp = &per_cpu(zcache_preloads, cpu); - while (kp->nr) { - kmem_cache_free(zcache_objnode_cache, - kp->objnodes[kp->nr - 1]); - kp->objnodes[kp->nr - 1] = NULL; - kp->nr--; - } - kmem_cache_free(zcache_obj_cache, kp->obj); - free_page((unsigned long)kp->page); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block zcache_cpu_notifier_block = { - .notifier_call = zcache_cpu_notifier -}; - -#ifdef CONFIG_SYSFS -#define ZCACHE_SYSFS_RO(_name) \ - static ssize_t zcache_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf) \ - { \ - return sprintf(buf, "%lu\n", zcache_##_name); \ - } \ - static struct kobj_attribute zcache_##_name##_attr = { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = zcache_##_name##_show, \ - } - -#define ZCACHE_SYSFS_RO_ATOMIC(_name) \ - static ssize_t zcache_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf) \ - { \ - return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \ - } \ - static struct kobj_attribute zcache_##_name##_attr = { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = zcache_##_name##_show, \ - } - -#define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \ - static ssize_t zcache_##_name##_show(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf) \ - { \ - return _func(buf); \ - } \ - static struct kobj_attribute zcache_##_name##_attr = { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .show = zcache_##_name##_show, \ - } - -ZCACHE_SYSFS_RO(curr_obj_count_max); -ZCACHE_SYSFS_RO(curr_objnode_count_max); -ZCACHE_SYSFS_RO(flush_total); -ZCACHE_SYSFS_RO(flush_found); -ZCACHE_SYSFS_RO(flobj_total); -ZCACHE_SYSFS_RO(flobj_found); -ZCACHE_SYSFS_RO(failed_eph_puts); -ZCACHE_SYSFS_RO(failed_pers_puts); -ZCACHE_SYSFS_RO(zbud_curr_zbytes); -ZCACHE_SYSFS_RO(zbud_cumul_zpages); -ZCACHE_SYSFS_RO(zbud_cumul_zbytes); -ZCACHE_SYSFS_RO(zbud_buddied_count); -ZCACHE_SYSFS_RO(zbpg_unused_list_count); -ZCACHE_SYSFS_RO(evicted_raw_pages); -ZCACHE_SYSFS_RO(evicted_unbuddied_pages); -ZCACHE_SYSFS_RO(evicted_buddied_pages); -ZCACHE_SYSFS_RO(failed_get_free_pages); -ZCACHE_SYSFS_RO(failed_alloc); -ZCACHE_SYSFS_RO(put_to_flush); -ZCACHE_SYSFS_RO(aborted_preload); -ZCACHE_SYSFS_RO(aborted_shrink); -ZCACHE_SYSFS_RO(compress_poor); -ZCACHE_SYSFS_RO(mean_compress_poor); -ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages); -ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages); -ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count); -ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count); -ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts, - zbud_show_unbuddied_list_counts); -ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts, - zbud_show_cumul_chunk_counts); -ZCACHE_SYSFS_RO_CUSTOM(zv_curr_dist_counts, - zv_curr_dist_counts_show); -ZCACHE_SYSFS_RO_CUSTOM(zv_cumul_dist_counts, - zv_cumul_dist_counts_show); - -static struct attribute *zcache_attrs[] = { - &zcache_curr_obj_count_attr.attr, - &zcache_curr_obj_count_max_attr.attr, - &zcache_curr_objnode_count_attr.attr, - &zcache_curr_objnode_count_max_attr.attr, - &zcache_flush_total_attr.attr, - &zcache_flobj_total_attr.attr, - &zcache_flush_found_attr.attr, - &zcache_flobj_found_attr.attr, - &zcache_failed_eph_puts_attr.attr, - &zcache_failed_pers_puts_attr.attr, - &zcache_compress_poor_attr.attr, - &zcache_mean_compress_poor_attr.attr, - &zcache_zbud_curr_raw_pages_attr.attr, - &zcache_zbud_curr_zpages_attr.attr, - &zcache_zbud_curr_zbytes_attr.attr, - &zcache_zbud_cumul_zpages_attr.attr, - &zcache_zbud_cumul_zbytes_attr.attr, - &zcache_zbud_buddied_count_attr.attr, - &zcache_zbpg_unused_list_count_attr.attr, - &zcache_evicted_raw_pages_attr.attr, - &zcache_evicted_unbuddied_pages_attr.attr, - &zcache_evicted_buddied_pages_attr.attr, - &zcache_failed_get_free_pages_attr.attr, - &zcache_failed_alloc_attr.attr, - &zcache_put_to_flush_attr.attr, - &zcache_aborted_preload_attr.attr, - &zcache_aborted_shrink_attr.attr, - &zcache_zbud_unbuddied_list_counts_attr.attr, - &zcache_zbud_cumul_chunk_counts_attr.attr, - &zcache_zv_curr_dist_counts_attr.attr, - &zcache_zv_cumul_dist_counts_attr.attr, - &zcache_zv_max_zsize_attr.attr, - &zcache_zv_max_mean_zsize_attr.attr, - &zcache_zv_page_count_policy_percent_attr.attr, - NULL, -}; - -static struct attribute_group zcache_attr_group = { - .attrs = zcache_attrs, - .name = "zcache", -}; - -#endif /* CONFIG_SYSFS */ -/* - * When zcache is disabled ("frozen"), pools can be created and destroyed, - * but all puts (and thus all other operations that require memory allocation) - * must fail. If zcache is unfrozen, accepts puts, then frozen again, - * data consistency requires all puts while frozen to be converted into - * flushes. - */ -static bool zcache_freeze; - -/* - * zcache shrinker interface (only useful for ephemeral pages, so zbud only) - */ -static int shrink_zcache_memory(struct shrinker *shrink, - struct shrink_control *sc) -{ - int ret = -1; - int nr = sc->nr_to_scan; - gfp_t gfp_mask = sc->gfp_mask; - - if (nr >= 0) { - if (!(gfp_mask & __GFP_FS)) - /* does this case really need to be skipped? */ - goto out; - if (spin_trylock(&zcache_direct_reclaim_lock)) { - zbud_evict_pages(nr); - spin_unlock(&zcache_direct_reclaim_lock); - } else - zcache_aborted_shrink++; - } - ret = (int)atomic_read(&zcache_zbud_curr_raw_pages); -out: - return ret; -} - -static struct shrinker zcache_shrinker = { - .shrink = shrink_zcache_memory, - .seeks = DEFAULT_SEEKS, -}; - -/* - * zcache shims between cleancache/frontswap ops and tmem - */ - -static int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp, - uint32_t index, struct page *page) -{ - struct tmem_pool *pool; - int ret = -1; - - BUG_ON(!irqs_disabled()); - pool = zcache_get_pool_by_id(cli_id, pool_id); - if (unlikely(pool == NULL)) - goto out; - if (!zcache_freeze && zcache_do_preload(pool) == 0) { - /* preload does preempt_disable on success */ - ret = tmem_put(pool, oidp, index, page_address(page), - PAGE_SIZE, 0, is_ephemeral(pool)); - if (ret < 0) { - if (is_ephemeral(pool)) - zcache_failed_eph_puts++; - else - zcache_failed_pers_puts++; - } - zcache_put_pool(pool); - preempt_enable_no_resched(); - } else { - zcache_put_to_flush++; - if (atomic_read(&pool->obj_count) > 0) - /* the put fails whether the flush succeeds or not */ - (void)tmem_flush_page(pool, oidp, index); - zcache_put_pool(pool); - } -out: - return ret; -} - -static int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp, - uint32_t index, struct page *page) -{ - struct tmem_pool *pool; - int ret = -1; - unsigned long flags; - size_t size = PAGE_SIZE; - - local_irq_save(flags); - pool = zcache_get_pool_by_id(cli_id, pool_id); - if (likely(pool != NULL)) { - if (atomic_read(&pool->obj_count) > 0) - ret = tmem_get(pool, oidp, index, page_address(page), - &size, 0, is_ephemeral(pool)); - zcache_put_pool(pool); - } - local_irq_restore(flags); - return ret; -} - -static int zcache_flush_page(int cli_id, int pool_id, - struct tmem_oid *oidp, uint32_t index) -{ - struct tmem_pool *pool; - int ret = -1; - unsigned long flags; - - local_irq_save(flags); - zcache_flush_total++; - pool = zcache_get_pool_by_id(cli_id, pool_id); - if (likely(pool != NULL)) { - if (atomic_read(&pool->obj_count) > 0) - ret = tmem_flush_page(pool, oidp, index); - zcache_put_pool(pool); - } - if (ret >= 0) - zcache_flush_found++; - local_irq_restore(flags); - return ret; -} - -static int zcache_flush_object(int cli_id, int pool_id, - struct tmem_oid *oidp) -{ - struct tmem_pool *pool; - int ret = -1; - unsigned long flags; - - local_irq_save(flags); - zcache_flobj_total++; - pool = zcache_get_pool_by_id(cli_id, pool_id); - if (likely(pool != NULL)) { - if (atomic_read(&pool->obj_count) > 0) - ret = tmem_flush_object(pool, oidp); - zcache_put_pool(pool); - } - if (ret >= 0) - zcache_flobj_found++; - local_irq_restore(flags); - return ret; -} - -static int zcache_destroy_pool(int cli_id, int pool_id) -{ - struct tmem_pool *pool = NULL; - struct zcache_client *cli = NULL; - int ret = -1; - - if (pool_id < 0) - goto out; - if (cli_id == LOCAL_CLIENT) - cli = &zcache_host; - else if ((unsigned int)cli_id < MAX_CLIENTS) - cli = &zcache_clients[cli_id]; - if (cli == NULL) - goto out; - atomic_inc(&cli->refcount); - pool = cli->tmem_pools[pool_id]; - if (pool == NULL) - goto out; - cli->tmem_pools[pool_id] = NULL; - /* wait for pool activity on other cpus to quiesce */ - while (atomic_read(&pool->refcount) != 0) - ; - atomic_dec(&cli->refcount); - local_bh_disable(); - ret = tmem_destroy_pool(pool); - local_bh_enable(); - kfree(pool); - pr_info("zcache: destroyed pool id=%d, cli_id=%d\n", - pool_id, cli_id); -out: - return ret; -} - -static int zcache_new_pool(uint16_t cli_id, uint32_t flags) -{ - int poolid = -1; - struct tmem_pool *pool; - struct zcache_client *cli = NULL; - - if (cli_id == LOCAL_CLIENT) - cli = &zcache_host; - else if ((unsigned int)cli_id < MAX_CLIENTS) - cli = &zcache_clients[cli_id]; - if (cli == NULL) - goto out; - atomic_inc(&cli->refcount); - pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL); - if (pool == NULL) { - pr_info("zcache: pool creation failed: out of memory\n"); - goto out; - } - - for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++) - if (cli->tmem_pools[poolid] == NULL) - break; - if (poolid >= MAX_POOLS_PER_CLIENT) { - pr_info("zcache: pool creation failed: max exceeded\n"); - kfree(pool); - poolid = -1; - goto out; - } - atomic_set(&pool->refcount, 0); - pool->client = cli; - pool->pool_id = poolid; - tmem_new_pool(pool, flags); - cli->tmem_pools[poolid] = pool; - pr_info("zcache: created %s tmem pool, id=%d, client=%d\n", - flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral", - poolid, cli_id); -out: - if (cli != NULL) - atomic_dec(&cli->refcount); - return poolid; -} - -/********** - * Two kernel functionalities currently can be layered on top of tmem. - * These are "cleancache" which is used as a second-chance cache for clean - * page cache pages; and "frontswap" which is used for swap pages - * to avoid writes to disk. A generic "shim" is provided here for each - * to translate in-kernel semantics to zcache semantics. - */ - -#ifdef CONFIG_CLEANCACHE -static void zcache_cleancache_put_page(int pool_id, - struct cleancache_filekey key, - pgoff_t index, struct page *page) -{ - u32 ind = (u32) index; - struct tmem_oid oid = *(struct tmem_oid *)&key; - - if (likely(ind == index)) - (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index, page); -} - -static int zcache_cleancache_get_page(int pool_id, - struct cleancache_filekey key, - pgoff_t index, struct page *page) -{ - u32 ind = (u32) index; - struct tmem_oid oid = *(struct tmem_oid *)&key; - int ret = -1; - - if (likely(ind == index)) - ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index, page); - return ret; -} - -static void zcache_cleancache_flush_page(int pool_id, - struct cleancache_filekey key, - pgoff_t index) -{ - u32 ind = (u32) index; - struct tmem_oid oid = *(struct tmem_oid *)&key; - - if (likely(ind == index)) - (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind); -} - -static void zcache_cleancache_flush_inode(int pool_id, - struct cleancache_filekey key) -{ - struct tmem_oid oid = *(struct tmem_oid *)&key; - - (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid); -} - -static void zcache_cleancache_flush_fs(int pool_id) -{ - if (pool_id >= 0) - (void)zcache_destroy_pool(LOCAL_CLIENT, pool_id); -} - -static int zcache_cleancache_init_fs(size_t pagesize) -{ - BUG_ON(sizeof(struct cleancache_filekey) != - sizeof(struct tmem_oid)); - BUG_ON(pagesize != PAGE_SIZE); - return zcache_new_pool(LOCAL_CLIENT, 0); -} - -static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize) -{ - /* shared pools are unsupported and map to private */ - BUG_ON(sizeof(struct cleancache_filekey) != - sizeof(struct tmem_oid)); - BUG_ON(pagesize != PAGE_SIZE); - return zcache_new_pool(LOCAL_CLIENT, 0); -} - -static struct cleancache_ops zcache_cleancache_ops = { - .put_page = zcache_cleancache_put_page, - .get_page = zcache_cleancache_get_page, - .flush_page = zcache_cleancache_flush_page, - .flush_inode = zcache_cleancache_flush_inode, - .flush_fs = zcache_cleancache_flush_fs, - .init_shared_fs = zcache_cleancache_init_shared_fs, - .init_fs = zcache_cleancache_init_fs -}; - -struct cleancache_ops zcache_cleancache_register_ops(void) -{ - struct cleancache_ops old_ops = - cleancache_register_ops(&zcache_cleancache_ops); - - return old_ops; -} -#endif - -#ifdef CONFIG_FRONTSWAP -/* a single tmem poolid is used for all frontswap "types" (swapfiles) */ -static int zcache_frontswap_poolid = -1; - -/* - * Swizzling increases objects per swaptype, increasing tmem concurrency - * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS - */ -#define SWIZ_BITS 4 -#define SWIZ_MASK ((1 << SWIZ_BITS) - 1) -#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK)) -#define iswiz(_ind) (_ind >> SWIZ_BITS) - -static inline struct tmem_oid oswiz(unsigned type, u32 ind) -{ - struct tmem_oid oid = { .oid = { 0 } }; - oid.oid[0] = _oswiz(type, ind); - return oid; -} - -static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, - struct page *page) -{ - u64 ind64 = (u64)offset; - u32 ind = (u32)offset; - struct tmem_oid oid = oswiz(type, ind); - int ret = -1; - unsigned long flags; - - BUG_ON(!PageLocked(page)); - if (likely(ind64 == ind)) { - local_irq_save(flags); - ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid, - &oid, iswiz(ind), page); - local_irq_restore(flags); - } - return ret; -} - -/* returns 0 if the page was successfully gotten from frontswap, -1 if - * was not present (should never happen!) */ -static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, - struct page *page) -{ - u64 ind64 = (u64)offset; - u32 ind = (u32)offset; - struct tmem_oid oid = oswiz(type, ind); - int ret = -1; - - BUG_ON(!PageLocked(page)); - if (likely(ind64 == ind)) - ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid, - &oid, iswiz(ind), page); - return ret; -} - -/* flush a single page from frontswap */ -static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset) -{ - u64 ind64 = (u64)offset; - u32 ind = (u32)offset; - struct tmem_oid oid = oswiz(type, ind); - - if (likely(ind64 == ind)) - (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid, - &oid, iswiz(ind)); -} - -/* flush all pages from the passed swaptype */ -static void zcache_frontswap_flush_area(unsigned type) -{ - struct tmem_oid oid; - int ind; - - for (ind = SWIZ_MASK; ind >= 0; ind--) { - oid = oswiz(type, ind); - (void)zcache_flush_object(LOCAL_CLIENT, - zcache_frontswap_poolid, &oid); - } -} - -static void zcache_frontswap_init(unsigned ignored) -{ - /* a single tmem poolid is used for all frontswap "types" (swapfiles) */ - if (zcache_frontswap_poolid < 0) - zcache_frontswap_poolid = - zcache_new_pool(LOCAL_CLIENT, TMEM_POOL_PERSIST); -} - -static struct frontswap_ops zcache_frontswap_ops = { - .put_page = zcache_frontswap_put_page, - .get_page = zcache_frontswap_get_page, - .flush_page = zcache_frontswap_flush_page, - .flush_area = zcache_frontswap_flush_area, - .init = zcache_frontswap_init -}; - -struct frontswap_ops zcache_frontswap_register_ops(void) -{ - struct frontswap_ops old_ops = - frontswap_register_ops(&zcache_frontswap_ops); - - return old_ops; -} -#endif - -/* - * zcache initialization - * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR - * NOTHING HAPPENS! - */ - -static int zcache_enabled; - -static int __init enable_zcache(char *s) -{ - zcache_enabled = 1; - return 1; -} -__setup("zcache", enable_zcache); - -/* allow independent dynamic disabling of cleancache and frontswap */ - -static int use_cleancache = 1; - -static int __init no_cleancache(char *s) -{ - use_cleancache = 0; - return 1; -} - -__setup("nocleancache", no_cleancache); - -static int use_frontswap = 1; - -static int __init no_frontswap(char *s) -{ - use_frontswap = 0; - return 1; -} - -__setup("nofrontswap", no_frontswap); - -static int __init zcache_init(void) -{ -#ifdef CONFIG_SYSFS - int ret = 0; - - ret = sysfs_create_group(mm_kobj, &zcache_attr_group); - if (ret) { - pr_err("zcache: can't create sysfs\n"); - goto out; - } -#endif /* CONFIG_SYSFS */ -#if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP) - if (zcache_enabled) { - unsigned int cpu; - - tmem_register_hostops(&zcache_hostops); - tmem_register_pamops(&zcache_pamops); - ret = register_cpu_notifier(&zcache_cpu_notifier_block); - if (ret) { - pr_err("zcache: can't register cpu notifier\n"); - goto out; - } - for_each_online_cpu(cpu) { - void *pcpu = (void *)(long)cpu; - zcache_cpu_notifier(&zcache_cpu_notifier_block, - CPU_UP_PREPARE, pcpu); - } - } - zcache_objnode_cache = kmem_cache_create("zcache_objnode", - sizeof(struct tmem_objnode), 0, 0, NULL); - zcache_obj_cache = kmem_cache_create("zcache_obj", - sizeof(struct tmem_obj), 0, 0, NULL); - ret = zcache_new_client(LOCAL_CLIENT); - if (ret) { - pr_err("zcache: can't create client\n"); - goto out; - } -#endif -#ifdef CONFIG_CLEANCACHE - if (zcache_enabled && use_cleancache) { - struct cleancache_ops old_ops; - - zbud_init(); - register_shrinker(&zcache_shrinker); - old_ops = zcache_cleancache_register_ops(); - pr_info("zcache: cleancache enabled using kernel " - "transcendent memory and compression buddies\n"); - if (old_ops.init_fs != NULL) - pr_warning("zcache: cleancache_ops overridden"); - } -#endif -#ifdef CONFIG_FRONTSWAP - if (zcache_enabled && use_frontswap) { - struct frontswap_ops old_ops; - - old_ops = zcache_frontswap_register_ops(); - pr_info("zcache: frontswap enabled using kernel " - "transcendent memory and xvmalloc\n"); - if (old_ops.init != NULL) - pr_warning("ktmem: frontswap_ops overridden"); - } -#endif -out: - return ret; -} - -module_init(zcache_init) -- cgit v0.10.2 From fd6b68bbac9f100c8728b2cfe4e34f03f3df4e4d Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Tue, 2 Aug 2011 14:20:27 -0300 Subject: staging: zcache: module is GPL This avoids tainting the kernel as if a proprietary module was loaded. The kernel will still be tainted because this is a staging driver. Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 65a81a0..a58a03f 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -53,6 +53,9 @@ #define MAX_CLIENTS 16 #define LOCAL_CLIENT ((uint16_t)-1) + +MODULE_LICENSE("GPL"); + struct zcache_client { struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT]; struct xv_pool *xvpool; -- cgit v0.10.2 From 2f8d92b7779d1f3145cd71552ed8282c8b299772 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 26 Jul 2011 17:15:50 +0100 Subject: gma500: Fix clashes with DRM updates The private object support has migrated from gma500 into the DRM core, remove our now clashing copy so -next can build. Signed-off-by: Alan Cox Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c index 779ac1a..daac121 100644 --- a/drivers/staging/gma500/gem_glue.c +++ b/drivers/staging/gma500/gem_glue.c @@ -20,26 +20,6 @@ #include #include -/** - * Initialize an already allocated GEM object of the specified size with - * no GEM provided backing store. Instead the caller is responsible for - * backing the object and handling it. - */ -int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size) -{ - BUG_ON((size & (PAGE_SIZE - 1)) != 0); - - obj->dev = dev; - obj->filp = NULL; - - kref_init(&obj->refcount); - atomic_set(&obj->handle_count, 0); - obj->size = size; - - return 0; -} - void drm_gem_object_release_wrap(struct drm_gem_object *obj) { /* Remove the list map if one is present */ @@ -51,8 +31,7 @@ void drm_gem_object_release_wrap(struct drm_gem_object *obj) kfree(list->map); list->map = NULL; } - if (obj->filp) - drm_gem_object_release(obj); + drm_gem_object_release(obj); } /** diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h index a0f2bc4..ce5ce30 100644 --- a/drivers/staging/gma500/gem_glue.h +++ b/drivers/staging/gma500/gem_glue.h @@ -1,4 +1,2 @@ extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); -extern int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size); extern int gem_create_mmap_offset(struct drm_gem_object *obj); -- cgit v0.10.2 From 589c3ca00b7886bf743998398884cd4f4d354e17 Mon Sep 17 00:00:00 2001 From: Stefan Lippers-Hollmann Date: Tue, 2 Aug 2011 22:17:25 +0200 Subject: staging: rtl8192u: declare MODULE_FIRMWARE declaring MODULE_FIRMWARE has apparently forgotten while removing the embedded firmware arrays in 0a8692b534e18fcec6eac07551bb37a22659f5c7 (rtl8192u_usb: Remove built-in firmware images). Signed-off-by: Stefan Lippers-Hollmann Cc: stable [2.6.39+] Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c index 6766f46..4bb5fff 100644 --- a/drivers/staging/rtl8192u/r819xU_firmware.c +++ b/drivers/staging/rtl8192u/r819xU_firmware.c @@ -399,10 +399,7 @@ download_firmware_fail: } - - - - - - +MODULE_FIRMWARE("RTL8192U/boot.img"); +MODULE_FIRMWARE("RTL8192U/main.img"); +MODULE_FIRMWARE("RTL8192U/data.img"); -- cgit v0.10.2 From c060ae7db4ebf1e5e08199b2aec12f051189041a Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sun, 31 Jul 2011 02:44:05 -0400 Subject: staging: ft1000_proc needs asm/io.h for inw/outw on sparc Seen during an allmodconfig build for sparc: CC [M] drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.o In file included from drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c:26: drivers/staging/ft1000/ft1000-pcmcia/ft1000.h: In function 'ft1000_read_reg': drivers/staging/ft1000/ft1000-pcmcia/ft1000.h:80: error: implicit declaration of function 'inw' drivers/staging/ft1000/ft1000-pcmcia/ft1000.h: In function 'ft1000_write_reg': drivers/staging/ft1000/ft1000-pcmcia/ft1000.h:86: error: implicit declaration of function 'outw' Signed-off-by: Paul Gortmaker Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c index 627a98b..9e728b3 100644 --- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c +++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "ft1000.h" -- cgit v0.10.2 From 8f89615528b11eabda68faaf2438d09c9565a125 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 30 Jul 2011 11:45:09 +0300 Subject: Staging: iio: add some unlocks to raw_read() functions This code was cut and pasted in several places. It's missing some unlocks on error. Signed-off-by: Dan Carpenter Acked-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c index bf19888..cf5d15d 100644 --- a/drivers/staging/iio/accel/adis16203_core.c +++ b/drivers/staging/iio/accel/adis16203_core.c @@ -311,13 +311,17 @@ static int adis16203_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); addr = adis16203_addresses[chan->address][0]; ret = adis16203_spi_read_reg_16(indio_dev, addr, &val16); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } if (val16 & ADIS16203_ERROR_ACTIVE) { ret = adis16203_check_status(indio_dev); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c index cfd09b3..3e2b626 100644 --- a/drivers/staging/iio/accel/adis16204_core.c +++ b/drivers/staging/iio/accel/adis16204_core.c @@ -341,13 +341,17 @@ static int adis16204_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); addr = adis16204_addresses[chan->address][0]; ret = adis16204_spi_read_reg_16(indio_dev, addr, &val16); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } if (val16 & ADIS16204_ERROR_ACTIVE) { ret = adis16204_check_status(indio_dev); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c index 55f3a7b..bec1fa8 100644 --- a/drivers/staging/iio/accel/adis16209_core.c +++ b/drivers/staging/iio/accel/adis16209_core.c @@ -337,13 +337,17 @@ static int adis16209_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); addr = adis16209_addresses[chan->address][0]; ret = adis16209_spi_read_reg_16(indio_dev, addr, &val16); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } if (val16 & ADIS16209_ERROR_ACTIVE) { ret = adis16209_check_status(indio_dev); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c index 4a4eafc..aee8b69 100644 --- a/drivers/staging/iio/accel/adis16240_core.c +++ b/drivers/staging/iio/accel/adis16240_core.c @@ -370,13 +370,17 @@ static int adis16240_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); addr = adis16240_addresses[chan->address][0]; ret = adis16240_spi_read_reg_16(indio_dev, addr, &val16); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } if (val16 & ADIS16240_ERROR_ACTIVE) { ret = adis16240_check_status(indio_dev); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c index 05797f4..f2d43cf 100644 --- a/drivers/staging/iio/gyro/adis16260_core.c +++ b/drivers/staging/iio/gyro/adis16260_core.c @@ -446,13 +446,17 @@ static int adis16260_read_raw(struct iio_dev *indio_dev, mutex_lock(&indio_dev->mlock); addr = adis16260_addresses[chan->address][0]; ret = adis16260_spi_read_reg_16(indio_dev, addr, &val16); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } if (val16 & ADIS16260_ERROR_ACTIVE) { ret = adis16260_check_status(indio_dev); - if (ret) + if (ret) { + mutex_unlock(&indio_dev->mlock); return ret; + } } val16 = val16 & ((1 << chan->scan_type.realbits) - 1); if (chan->scan_type.sign == 's') -- cgit v0.10.2 From 09f9390d797ff34020faab866996884fd93a0081 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 Jul 2011 13:59:35 -0700 Subject: drivers/staging/speakup/devsynth.c: fix "buffer size is not provably correct" error x86_64 allmodconfig: In file included from arch/x86/include/asm/uaccess.h:572, from include/linux/uaccess.h:5, from drivers/staging/speakup/devsynth.c:4: In function 'copy_from_user', inlined from 'speakup_file_write' at drivers/staging/speakup/devsynth.c:28: arch/x86/include/asm/uaccess_64.h:64: error: call to 'copy_from_user_overflow' declared with attribute error: copy_from_user() buffer size is not provably correct I'm not sure what was unprovable about it, but size_t is the correct type anyway. Also replace needless min_t() with min() Cc: William Hubbs Cc: Arjan van de Ven Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/speakup/devsynth.c b/drivers/staging/speakup/devsynth.c index 39dc586..940769e 100644 --- a/drivers/staging/speakup/devsynth.c +++ b/drivers/staging/speakup/devsynth.c @@ -18,13 +18,14 @@ static ssize_t speakup_file_write(struct file *fp, const char *buffer, { size_t count = nbytes; const char *ptr = buffer; - int bytes; + size_t bytes; unsigned long flags; u_char buf[256]; + if (synth == NULL) return -ENODEV; while (count > 0) { - bytes = min_t(size_t, count, sizeof(buf)); + bytes = min(count, sizeof(buf)); if (copy_from_user(buf, ptr, bytes)) return -EFAULT; count -= bytes; -- cgit v0.10.2 From dac95cb8cf40db85fa031b1f170e167cbaa9bf3a Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 Jul 2011 13:59:36 -0700 Subject: drivers/staging/dt3155v4l/dt3155v4l.c needs slab.h alpha allmodconfig: drivers/staging/dt3155v4l/dt3155v4l.c:434: error: implicit declaration of function 'kzalloc' Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/dt3155v4l/dt3155v4l.c b/drivers/staging/dt3155v4l/dt3155v4l.c index fe02d22..05aa41c 100644 --- a/drivers/staging/dt3155v4l/dt3155v4l.c +++ b/drivers/staging/dt3155v4l/dt3155v4l.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From dd010235fd714571df587d0f14c70eebc6823973 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 Jul 2011 13:59:37 -0700 Subject: drivers/staging/solo6x10/core.c needs slab.h alpha allmodconfig: drivers/staging/solo6x10/core.c:140: error: implicit declaration of function 'kzalloc' Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/solo6x10/core.c b/drivers/staging/solo6x10/core.c index 7677994..f974f64 100644 --- a/drivers/staging/solo6x10/core.c +++ b/drivers/staging/solo6x10/core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include "solo6x10.h" #include "tw28.h" -- cgit v0.10.2 From 7b4785f45db2c124a6cbd73114e7e078a9251e0a Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 Jul 2011 13:59:37 -0700 Subject: drivers/staging/solo6x10/p2m.c needs slab.h alpha allmodconfig: drivers/staging/solo6x10/p2m.c:52: error: implicit declaration of function 'kzalloc' Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/solo6x10/p2m.c b/drivers/staging/solo6x10/p2m.c index 5717eab..56210f0 100644 --- a/drivers/staging/solo6x10/p2m.c +++ b/drivers/staging/solo6x10/p2m.c @@ -18,6 +18,7 @@ */ #include +#include #include #include "solo6x10.h" -- cgit v0.10.2 From e72be3fc567f587b1f70134e7f6538a4f5ae8f95 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 28 Jul 2011 13:59:38 -0700 Subject: staging: more missing slab.h inclusions Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/solo6x10/enc.c b/drivers/staging/solo6x10/enc.c index 285f7f3..de50259 100644 --- a/drivers/staging/solo6x10/enc.c +++ b/drivers/staging/solo6x10/enc.c @@ -18,6 +18,7 @@ */ #include +#include #include "solo6x10.h" #include "osd-font.h" diff --git a/drivers/staging/solo6x10/g723.c b/drivers/staging/solo6x10/g723.c index bd8eb92..59274bf 100644 --- a/drivers/staging/solo6x10/g723.c +++ b/drivers/staging/solo6x10/g723.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From 193634ce7740e18a012c543fad0843c6af0440d9 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 28 Jul 2011 14:46:04 +0200 Subject: drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c: adjust array index Convert array index from the loop bound to the loop index. A simplified version of the semantic patch that fixes this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression e1,e2,ar; @@ for(e1 = 0; e1 < e2; e1++) { <... ar[ - e2 + e1 ] ...> } // Signed-off-by: Julia Lawall Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c index c01c0cb..b99a11a 100644 --- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c +++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsparser.c @@ -812,7 +812,7 @@ int AthCreateCommandList(struct ps_cmd_packet **HciPacketList, u32 *numPackets) for(count = 0; count < Patch_Count; count++) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Freeing Patch Buffer %d \r\n",count)); - kfree(RamPatch[Patch_Count].Data); + kfree(RamPatch[count].Data); } for(count = 0; count < Tag_Count; count++) { -- cgit v0.10.2 From 55dc6ee7def173f0dd3b6d0d0257917112d542e9 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 1 Aug 2011 11:45:49 +0100 Subject: Staging: Add clk API note to nvec/TODO Add a note about the abuse of the clk API to the nvec/TODO list. Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO index 77b47f7..649d6b7 100644 --- a/drivers/staging/nvec/TODO +++ b/drivers/staging/nvec/TODO @@ -4,5 +4,7 @@ ToDo list (incomplete, unordered) - add compile as module support - move nvec devices to mfd cells? - adjust to kernel style - - + - fix clk usage + should not be using clk_get_sys(), but clk_get(&pdev->dev, conn) + where conn is either NULL if the device only has one clock, or + the device specific name if it has multiple clocks. -- cgit v0.10.2 From fe3e593601752d547bd00d83f0bdedbce1d80f59 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 9 Jul 2011 21:23:26 +0200 Subject: drivers/staging/hv/blkvsc_drv.c: eliminate NULL pointer dereference In this code, blkvsc_req is allocated in the cache blkdev->request_pool, but freed in the first case to the cache blkvsc_req->dev->request_pool. blkvsc_req->dev is subsequently initialized to blkdev, making these the same at the second call to kmem_cache_free. But at the point of the first call, blkvsc_req->dev is NULL. The second call is changed too, for uniformity. The semantic patch that fixes this problem is as follows: (http://coccinelle.lip6.fr/) // @@ expression x,e,e1,e2,e3; @@ x = \(kmem_cache_alloc\|kmem_cache_zalloc\)(e1,e2) ... when != x = e ( kmem_cache_free(e1,x); | ?-kmem_cache_free(e3,x); +kmem_cache_free(e1,x); ) // Signed-off-by: Julia Lawall Cc: KY Srinivasan Cc: Hank Janssen Cc: Haiyang Zhang Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 3612574..d286b22 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -325,7 +325,7 @@ static int blkvsc_do_operation(struct block_device_context *blkdev, page_buf = alloc_page(GFP_KERNEL); if (!page_buf) { - kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + kmem_cache_free(blkdev->request_pool, blkvsc_req); return -ENOMEM; } @@ -422,7 +422,7 @@ cleanup: __free_page(page_buf); - kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); + kmem_cache_free(blkdev->request_pool, blkvsc_req); return ret; } -- cgit v0.10.2 From 151798f872d6b386d82cd1707ad703e981fef8f2 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 2 Aug 2011 19:42:19 +0200 Subject: ASoC: sgtl5000: fix cache handling Cache handling in this driver is broken. The chip has 16-bit registers, yet the register numbers also increase by 2 per register, i.e. there are only even-numbered registers. The cache in this driver, though, simply increments register numbers, so it does need some mapping as seen in sgtl5000_restore_regs(), note the '>> 1': snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL, cache[SGTL5000_CHIP_LINREG_CTRL >> 1]); That, of course, won't work with snd_soc_update_bits(). (Thus, we won't even notice the missing register 0x1c in the default regs which shifted all follwing registers to wrong values.) Noticed on the MX28EVK where enabling the regulators simply locked up the chip. Refactor the routines and use a properly sized default_regs array which matches the register layout of the underlying chip, i.e. create a truly flat cache. This also saves some code which should make up for the bigger array a little. When soc-core will somewhen have another cache type which handles a step size, this conversion will also ease the transition. Signed-off-by: Wolfram Sang Tested-by: Dong Aisheng Tested-by: Shawn Guo Acked-by: Liam Girdwood Signed-off-by: Mark Brown Cc: stable@kernel.org diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 76258f2..7e4066e 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -33,73 +33,31 @@ #define SGTL5000_DAP_REG_OFFSET 0x0100 #define SGTL5000_MAX_REG_OFFSET 0x013A -/* default value of sgtl5000 registers except DAP */ -static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET >> 1] = { - 0xa011, /* 0x0000, CHIP_ID. 11 stand for revison 17 */ - 0x0000, /* 0x0002, CHIP_DIG_POWER. */ - 0x0008, /* 0x0004, CHIP_CKL_CTRL */ - 0x0010, /* 0x0006, CHIP_I2S_CTRL */ - 0x0000, /* 0x0008, reserved */ - 0x0008, /* 0x000A, CHIP_SSS_CTRL */ - 0x0000, /* 0x000C, reserved */ - 0x020c, /* 0x000E, CHIP_ADCDAC_CTRL */ - 0x3c3c, /* 0x0010, CHIP_DAC_VOL */ - 0x0000, /* 0x0012, reserved */ - 0x015f, /* 0x0014, CHIP_PAD_STRENGTH */ - 0x0000, /* 0x0016, reserved */ - 0x0000, /* 0x0018, reserved */ - 0x0000, /* 0x001A, reserved */ - 0x0000, /* 0x001E, reserved */ - 0x0000, /* 0x0020, CHIP_ANA_ADC_CTRL */ - 0x1818, /* 0x0022, CHIP_ANA_HP_CTRL */ - 0x0111, /* 0x0024, CHIP_ANN_CTRL */ - 0x0000, /* 0x0026, CHIP_LINREG_CTRL */ - 0x0000, /* 0x0028, CHIP_REF_CTRL */ - 0x0000, /* 0x002A, CHIP_MIC_CTRL */ - 0x0000, /* 0x002C, CHIP_LINE_OUT_CTRL */ - 0x0404, /* 0x002E, CHIP_LINE_OUT_VOL */ - 0x7060, /* 0x0030, CHIP_ANA_POWER */ - 0x5000, /* 0x0032, CHIP_PLL_CTRL */ - 0x0000, /* 0x0034, CHIP_CLK_TOP_CTRL */ - 0x0000, /* 0x0036, CHIP_ANA_STATUS */ - 0x0000, /* 0x0038, reserved */ - 0x0000, /* 0x003A, CHIP_ANA_TEST2 */ - 0x0000, /* 0x003C, CHIP_SHORT_CTRL */ - 0x0000, /* reserved */ -}; - -/* default value of dap registers */ -static const u16 sgtl5000_dap_regs[] = { - 0x0000, /* 0x0100, DAP_CONTROL */ - 0x0000, /* 0x0102, DAP_PEQ */ - 0x0040, /* 0x0104, DAP_BASS_ENHANCE */ - 0x051f, /* 0x0106, DAP_BASS_ENHANCE_CTRL */ - 0x0000, /* 0x0108, DAP_AUDIO_EQ */ - 0x0040, /* 0x010A, DAP_SGTL_SURROUND */ - 0x0000, /* 0x010C, DAP_FILTER_COEF_ACCESS */ - 0x0000, /* 0x010E, DAP_COEF_WR_B0_MSB */ - 0x0000, /* 0x0110, DAP_COEF_WR_B0_LSB */ - 0x0000, /* 0x0112, reserved */ - 0x0000, /* 0x0114, reserved */ - 0x002f, /* 0x0116, DAP_AUDIO_EQ_BASS_BAND0 */ - 0x002f, /* 0x0118, DAP_AUDIO_EQ_BAND0 */ - 0x002f, /* 0x011A, DAP_AUDIO_EQ_BAND2 */ - 0x002f, /* 0x011C, DAP_AUDIO_EQ_BAND3 */ - 0x002f, /* 0x011E, DAP_AUDIO_EQ_TREBLE_BAND4 */ - 0x8000, /* 0x0120, DAP_MAIN_CHAN */ - 0x0000, /* 0x0122, DAP_MIX_CHAN */ - 0x0510, /* 0x0124, DAP_AVC_CTRL */ - 0x1473, /* 0x0126, DAP_AVC_THRESHOLD */ - 0x0028, /* 0x0128, DAP_AVC_ATTACK */ - 0x0050, /* 0x012A, DAP_AVC_DECAY */ - 0x0000, /* 0x012C, DAP_COEF_WR_B1_MSB */ - 0x0000, /* 0x012E, DAP_COEF_WR_B1_LSB */ - 0x0000, /* 0x0130, DAP_COEF_WR_B2_MSB */ - 0x0000, /* 0x0132, DAP_COEF_WR_B2_LSB */ - 0x0000, /* 0x0134, DAP_COEF_WR_A1_MSB */ - 0x0000, /* 0x0136, DAP_COEF_WR_A1_LSB */ - 0x0000, /* 0x0138, DAP_COEF_WR_A2_MSB */ - 0x0000, /* 0x013A, DAP_COEF_WR_A2_LSB */ +/* default value of sgtl5000 registers */ +static const u16 sgtl5000_regs[SGTL5000_MAX_REG_OFFSET] = { + [SGTL5000_CHIP_CLK_CTRL] = 0x0008, + [SGTL5000_CHIP_I2S_CTRL] = 0x0010, + [SGTL5000_CHIP_SSS_CTRL] = 0x0008, + [SGTL5000_CHIP_DAC_VOL] = 0x3c3c, + [SGTL5000_CHIP_PAD_STRENGTH] = 0x015f, + [SGTL5000_CHIP_ANA_HP_CTRL] = 0x1818, + [SGTL5000_CHIP_ANA_CTRL] = 0x0111, + [SGTL5000_CHIP_LINE_OUT_VOL] = 0x0404, + [SGTL5000_CHIP_ANA_POWER] = 0x7060, + [SGTL5000_CHIP_PLL_CTRL] = 0x5000, + [SGTL5000_DAP_BASS_ENHANCE] = 0x0040, + [SGTL5000_DAP_BASS_ENHANCE_CTRL] = 0x051f, + [SGTL5000_DAP_SURROUND] = 0x0040, + [SGTL5000_DAP_EQ_BASS_BAND0] = 0x002f, + [SGTL5000_DAP_EQ_BASS_BAND1] = 0x002f, + [SGTL5000_DAP_EQ_BASS_BAND2] = 0x002f, + [SGTL5000_DAP_EQ_BASS_BAND3] = 0x002f, + [SGTL5000_DAP_EQ_BASS_BAND4] = 0x002f, + [SGTL5000_DAP_MAIN_CHAN] = 0x8000, + [SGTL5000_DAP_AVC_CTRL] = 0x0510, + [SGTL5000_DAP_AVC_THRESHOLD] = 0x1473, + [SGTL5000_DAP_AVC_ATTACK] = 0x0028, + [SGTL5000_DAP_AVC_DECAY] = 0x0050, }; /* regulator supplies for sgtl5000, VDDD is an optional external supply */ @@ -1023,12 +981,10 @@ static int sgtl5000_suspend(struct snd_soc_codec *codec, pm_message_t state) static int sgtl5000_restore_regs(struct snd_soc_codec *codec) { u16 *cache = codec->reg_cache; - int i; - int regular_regs = SGTL5000_CHIP_SHORT_CTRL >> 1; + u16 reg; /* restore regular registers */ - for (i = 0; i < regular_regs; i++) { - int reg = i << 1; + for (reg = 0; reg <= SGTL5000_CHIP_SHORT_CTRL; reg += 2) { /* this regs depends on the others */ if (reg == SGTL5000_CHIP_ANA_POWER || @@ -1038,35 +994,31 @@ static int sgtl5000_restore_regs(struct snd_soc_codec *codec) reg == SGTL5000_CHIP_CLK_CTRL) continue; - snd_soc_write(codec, reg, cache[i]); + snd_soc_write(codec, reg, cache[reg]); } /* restore dap registers */ - for (i = SGTL5000_DAP_REG_OFFSET >> 1; - i < SGTL5000_MAX_REG_OFFSET >> 1; i++) { - int reg = i << 1; - - snd_soc_write(codec, reg, cache[i]); - } + for (reg = SGTL5000_DAP_REG_OFFSET; reg < SGTL5000_MAX_REG_OFFSET; reg += 2) + snd_soc_write(codec, reg, cache[reg]); /* * restore power and other regs according * to set_power() and set_clock() */ snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL, - cache[SGTL5000_CHIP_LINREG_CTRL >> 1]); + cache[SGTL5000_CHIP_LINREG_CTRL]); snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER, - cache[SGTL5000_CHIP_ANA_POWER >> 1]); + cache[SGTL5000_CHIP_ANA_POWER]); snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, - cache[SGTL5000_CHIP_CLK_CTRL >> 1]); + cache[SGTL5000_CHIP_CLK_CTRL]); snd_soc_write(codec, SGTL5000_CHIP_REF_CTRL, - cache[SGTL5000_CHIP_REF_CTRL >> 1]); + cache[SGTL5000_CHIP_REF_CTRL]); snd_soc_write(codec, SGTL5000_CHIP_LINE_OUT_CTRL, - cache[SGTL5000_CHIP_LINE_OUT_CTRL >> 1]); + cache[SGTL5000_CHIP_LINE_OUT_CTRL]); return 0; } @@ -1454,16 +1406,6 @@ static __devinit int sgtl5000_i2c_probe(struct i2c_client *client, if (!sgtl5000) return -ENOMEM; - /* - * copy DAP default values to default value array. - * sgtl5000 register space has a big hole, merge it - * at init phase makes life easy. - * FIXME: should we drop 'const' of sgtl5000_regs? - */ - memcpy((void *)(&sgtl5000_regs[0] + (SGTL5000_DAP_REG_OFFSET >> 1)), - sgtl5000_dap_regs, - SGTL5000_MAX_REG_OFFSET - SGTL5000_DAP_REG_OFFSET); - i2c_set_clientdata(client, sgtl5000); ret = snd_soc_register_codec(&client->dev, -- cgit v0.10.2 From f41c53a569c4cf0556893ec9cfcf697d069799e1 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 3 Aug 2011 15:02:55 +0200 Subject: block: swim3: fix unterminated of_device_id table of_device_id structures need a NULL terminating entry, add it. Signed-off-by: Axel Lin Signed-off-by: Jens Axboe diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 773bfa7..ae3e167 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1184,6 +1184,7 @@ static struct of_device_id swim3_match[] = { .compatible = "swim3" }, + { /* end of list */ } }; static struct macio_driver swim3_driver = -- cgit v0.10.2 From 12623f07b9d01bbaf3035284ea6a110787cc1e66 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Wed, 3 Aug 2011 11:00:40 -0300 Subject: staging: zcache: include module.h for MODULE_LICENSE The oncoming cleanup of module.h usage requires the explicit inclusion of module.h when it was otherwise being included indirectly. Otherwise, building zcache will fail. Reported-by: Stephen Rothwell Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index a58a03f..66469ac 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -19,6 +19,7 @@ * http://marc.info/?l=linux-mm&m=127811271605009 */ +#include #include #include #include -- cgit v0.10.2 From f8d73aa362cec89e3379bdcdae54cc46e0a6b34d Mon Sep 17 00:00:00 2001 From: Wanlong Gao Date: Wed, 3 Aug 2011 17:17:36 +0800 Subject: drivers:staging:solo6x10:add the missed slab.h Add the missed linux/slab.h to solo6x10.h. Signed-off-by: Wanlong Gao Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/solo6x10/solo6x10.h b/drivers/staging/solo6x10/solo6x10.h index 17c06bd..abee721 100644 --- a/drivers/staging/solo6x10/solo6x10.h +++ b/drivers/staging/solo6x10/solo6x10.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From 00894ce9b85887caa0c16e18757004b9cc9f64cf Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 3 Aug 2011 12:33:24 -0300 Subject: perf report: Use ui__warning in some more places So that we get a proper warning in the TUI in cases like: $ perf report --stdio -g fractal,0.5,caller --sort pid Selected -g but no callchain data. Did you call 'perf record' without -g? $ The --stdio case is ok because it uses fprintf, ui__warning is needed to figure out if --stdio or --tui is being used. Cc: Arun Sharma Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Sam Liao Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-ag9fz2wd17mbbfjsbznq1wms@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index f854efd..d7ff277 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -162,23 +162,22 @@ static int perf_session__setup_sample_type(struct perf_session *self) { if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { - fprintf(stderr, "selected --sort parent, but no" - " callchain data. Did you call" - " perf record without -g?\n"); + ui__warning("Selected --sort parent, but no " + "callchain data. Did you call " + "'perf record' without -g?\n"); return -EINVAL; } if (symbol_conf.use_callchain) { - fprintf(stderr, "selected -g but no callchain data." - " Did you call perf record without" - " -g?\n"); + ui__warning("Selected -g but no callchain data. Did " + "you call 'perf record' without -g?\n"); return -1; } } else if (!dont_use_callchains && callchain_param.mode != CHAIN_NONE && !symbol_conf.use_callchain) { symbol_conf.use_callchain = true; if (callchain_register_param(&callchain_param) < 0) { - fprintf(stderr, "Can't register callchain" - " params\n"); + ui__warning("Can't register callchain " + "params.\n"); return -EINVAL; } } -- cgit v0.10.2 From 20feaab0323cc062b298c12e77869424df05f31f Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 4 Aug 2011 00:01:00 +0100 Subject: ARM: Fix build error for SMP=n builds Unfortunately, the module fixups cause the kernel to fail to build when SMP is not enabled. Fix this by removing the reference to fixup_smp on non-SMP fixup kernels, but ensuring that if we do have the SMP fixup section, we refuse to load the module. Signed-off-by: Russell King diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 05b3776..cc2020c 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -323,7 +323,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, #endif s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); if (s && !is_smp()) +#ifdef CONFIG_SMP_ON_UP fixup_smp((void *)s->sh_addr, s->sh_size); +#else + return -EINVAL; +#endif return 0; } -- cgit v0.10.2 From adabb3ec8b0bcbd2ca81973d33c3da726b939c7c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 3 Aug 2011 07:48:37 +0200 Subject: ALSA: hda - Fix digital-mic mono recording on ASUS Eee PC The digital-mic unit on ASUS Eee PC gives PDM signals instead of the normal stereo PCM, thus you can't record a mono stream from the stereo stream as is; the summed stereo signal results in almost zero level, and you'll hear only soft noise. As a workaround, use ALC269-specific COEF to manipulate the dmic route for mono, like used for ALC271x. This is implemented as a fix-up, thus it works only with model=auto or without REALTEK_QUIRKS Kconfig. Reported-and-tested-by: Pavel Roskin Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e125c60..9a1aa09 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4484,6 +4484,22 @@ static void alc269_fixup_pcm_44k(struct hda_codec *codec, spec->stream_analog_capture = &alc269_44k_pcm_analog_capture; } +static void alc269_fixup_stereo_dmic(struct hda_codec *codec, + const struct alc_fixup *fix, int action) +{ + int coef; + + if (action != ALC_FIXUP_ACT_INIT) + return; + /* The digital-mic unit sends PDM (differential signal) instead of + * the standard PCM, thus you can't record a valid mono stream as is. + * Below is a workaround specific to ALC269 to control the dmic + * signal source as mono. + */ + coef = alc_read_coef_idx(codec, 0x07); + alc_write_coef_idx(codec, 0x07, coef | 0x80); +} + enum { ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, @@ -4494,6 +4510,7 @@ enum { ALC275_FIXUP_SONY_HWEQ, ALC271_FIXUP_DMIC, ALC269_FIXUP_PCM_44K, + ALC269_FIXUP_STEREO_DMIC, }; static const struct alc_fixup alc269_fixups[] = { @@ -4556,10 +4573,19 @@ static const struct alc_fixup alc269_fixups[] = { .type = ALC_FIXUP_FUNC, .v.func = alc269_fixup_pcm_44k, }, + [ALC269_FIXUP_STEREO_DMIC] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc269_fixup_stereo_dmic, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2), SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), -- cgit v0.10.2 From c3540b81ee707bc8a7a83e850adf5feb3b84c04d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 4 Aug 2011 15:19:26 +0200 Subject: ALSA: hda - Use auto-parser for ASUS UX50, Eee PC P901, S101 and P1005 It works fine with auto-parser and now the digital mic workaround was implemented in auto-parser fixup, let's drop the static model quirks for these models. Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/alc269_quirks.c b/sound/pci/hda/alc269_quirks.c index 14fdcf2..5ac0e21 100644 --- a/sound/pci/hda/alc269_quirks.c +++ b/sound/pci/hda/alc269_quirks.c @@ -531,17 +531,10 @@ static const struct snd_pci_quirk alc269_cfg_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1653, "ASUS U50", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x1693, "ASUS F50N", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS F5Q", ALC269_AMIC), - SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_DMIC), SND_PCI_QUIRK(0x1043, 0x1723, "ASUS P80", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x1743, "ASUS U80", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x1773, "ASUS U20A", ALC269_AMIC), SND_PCI_QUIRK(0x1043, 0x1883, "ASUS F81Se", ALC269_AMIC), - SND_PCI_QUIRK(0x1043, 0x831a, "ASUS Eeepc P901", - ALC269_DMIC), - SND_PCI_QUIRK(0x1043, 0x834a, "ASUS Eeepc S101", - ALC269_DMIC), - SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005HA", ALC269_DMIC), - SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005HA", ALC269_DMIC), SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_AUTO), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook ICH9M-based", ALC269_LIFEBOOK), SND_PCI_QUIRK(0x152d, 0x1778, "Quanta ON1", ALC269_DMIC), -- cgit v0.10.2 From 2921623f71c0a7f8ab979a8903cccd7a374436e7 Mon Sep 17 00:00:00 2001 From: Deepak Saxena Date: Wed, 3 Aug 2011 17:04:01 -0700 Subject: sound: oss/pas2: Remove CLOCK_TICK_RATE dependency from PAS16 driver Update the PAS16 driver to use PIT_TICK_RATE instead of the more generic CLOCK_TICK_RATE as the two are equivalent on X86 and we want to depecrate the later. Signed-off-by: Deepak Saxena Signed-off-by: Takashi Iwai diff --git a/sound/oss/pas2_pcm.c b/sound/oss/pas2_pcm.c index 8f7d175..6f13ab4 100644 --- a/sound/oss/pas2_pcm.c +++ b/sound/oss/pas2_pcm.c @@ -63,13 +63,13 @@ static int pcm_set_speed(int arg) if (pcm_channels & 2) { - foo = ((CLOCK_TICK_RATE / 2) + (arg / 2)) / arg; - arg = ((CLOCK_TICK_RATE / 2) + (foo / 2)) / foo; + foo = ((PIT_TICK_RATE / 2) + (arg / 2)) / arg; + arg = ((PIT_TICK_RATE / 2) + (foo / 2)) / foo; } else { - foo = (CLOCK_TICK_RATE + (arg / 2)) / arg; - arg = (CLOCK_TICK_RATE + (foo / 2)) / foo; + foo = (PIT_TICK_RATE + (arg / 2)) / arg; + arg = (PIT_TICK_RATE + (foo / 2)) / foo; } pcm_speed = arg; -- cgit v0.10.2 From 824818b148db42173446707df4cbd61cd7133272 Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Thu, 4 Aug 2011 16:17:42 +0200 Subject: ALSA: snd-usb: Accept UAC2 FORMAT_TYPE descriptors with bLength > 6 The Focusrite Scarlett 18i6 USB has them that way, which is probably a bug. Anyway, the driver should simply ignore this fact. Signed-off-by: Daniel Mack Reported-by: Nicolai Krakowiak Cc: stable@kernel.org Signed-off-by: Clemens Ladisch Signed-off-by: Takashi Iwai diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 7c0d21e..7d46e48 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -352,7 +352,7 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) continue; } if (((protocol == UAC_VERSION_1) && (fmt->bLength < 8)) || - ((protocol == UAC_VERSION_2) && (fmt->bLength != 6))) { + ((protocol == UAC_VERSION_2) && (fmt->bLength < 6))) { snd_printk(KERN_ERR "%d:%u:%d : invalid UAC_FORMAT_TYPE desc\n", dev->devnum, iface_no, altno); continue; -- cgit v0.10.2 From 60c961a9e1ed879a4d151df6076bf1203f595f73 Mon Sep 17 00:00:00 2001 From: Nicolai Krakowiak Date: Thu, 4 Aug 2011 15:56:27 +0200 Subject: ALSA: snd-usb: avoid dividing by zero on invalid input Signed-off-by: Nicolai Krakowiak Acked-by: Daniel Mack Acked-by: Clemens Ladisch Cc: stable@kernel.org Signed-off-by: Takashi Iwai diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c22fa76..ee9aa08 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1191,6 +1191,11 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void if (state->mixer->protocol == UAC_VERSION_1) { csize = hdr->bControlSize; + if (!csize) { + snd_printdd(KERN_ERR "usbaudio: unit %u: " + "invalid bControlSize == 0\n", unitid); + return -EINVAL; + } channels = (hdr->bLength - 7) / csize - 1; bmaControls = hdr->bmaControls; } else { -- cgit v0.10.2 From 1faa5d07a93fc5b0a4a5254fc940a79e20b55540 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 4 Aug 2011 15:56:28 +0200 Subject: ALSA: snd-usb: operate on given mixer interface only When creating the mixers for an USB audio device, the current code looks at the host interface stored in mixer->chip->ctrl_if. Change this and rather keep a local pointer to the interface that was given when snd_usb_create_mixer() was called. Signed-off-by: Daniel Mack Reported-by: Nicolai Krakowiak Reported-by: Lean-Yves LENHOF Acked-by: Clemens Ladisch Cc: stable@kernel.org Signed-off-by: Takashi Iwai diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index ee9aa08..c04d7c7 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1939,15 +1939,13 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) struct mixer_build state; int err; const struct usbmix_ctl_map *map; - struct usb_host_interface *hostif; void *p; - hostif = mixer->chip->ctrl_intf; memset(&state, 0, sizeof(state)); state.chip = mixer->chip; state.mixer = mixer; - state.buffer = hostif->extra; - state.buflen = hostif->extralen; + state.buffer = mixer->hostif->extra; + state.buflen = mixer->hostif->extralen; /* check the mapping table */ for (map = usbmix_ctl_maps; map->id; map++) { @@ -1960,7 +1958,8 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) } p = NULL; - while ((p = snd_usb_find_csint_desc(hostif->extra, hostif->extralen, p, UAC_OUTPUT_TERMINAL)) != NULL) { + while ((p = snd_usb_find_csint_desc(mixer->hostif->extra, mixer->hostif->extralen, + p, UAC_OUTPUT_TERMINAL)) != NULL) { if (mixer->protocol == UAC_VERSION_1) { struct uac1_output_terminal_descriptor *desc = p; @@ -2167,17 +2166,15 @@ int snd_usb_mixer_activate(struct usb_mixer_interface *mixer) /* create the handler for the optional status interrupt endpoint */ static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer) { - struct usb_host_interface *hostif; struct usb_endpoint_descriptor *ep; void *transfer_buffer; int buffer_length; unsigned int epnum; - hostif = mixer->chip->ctrl_intf; /* we need one interrupt input endpoint */ - if (get_iface_desc(hostif)->bNumEndpoints < 1) + if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1) return 0; - ep = get_endpoint(hostif, 0); + ep = get_endpoint(mixer->hostif, 0); if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep)) return 0; @@ -2207,7 +2204,6 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, }; struct usb_mixer_interface *mixer; struct snd_info_entry *entry; - struct usb_host_interface *host_iface; int err; strcpy(chip->card->mixername, "USB Mixer"); @@ -2224,8 +2220,8 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, return -ENOMEM; } - host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; - switch (get_iface_desc(host_iface)->bInterfaceProtocol) { + mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; + switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) { case UAC_VERSION_1: default: mixer->protocol = UAC_VERSION_1; diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index ae1a14d..81b2d8a 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -3,6 +3,7 @@ struct usb_mixer_interface { struct snd_usb_audio *chip; + struct usb_host_interface *hostif; struct list_head list; unsigned int ignore_ctl_error; struct urb *urb; -- cgit v0.10.2 From 88c9e42196285a7c573e2abda11a4b5037c669bc Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 2 Aug 2011 09:57:35 +0200 Subject: nfs: add missing prefetch.h include Fix this compile error on s390: CC [M] fs/nfs/blocklayout/blocklayout.o fs/nfs/blocklayout/blocklayout.c: In function 'bl_end_io_read': fs/nfs/blocklayout/blocklayout.c:201:4: error: implicit declaration of function 'prefetchw' Introduced with 9549ec01 "pnfsblock: bl_read_pagelist". Cc: Fred Isaman Signed-off-by: Heiko Carstens Signed-off-by: Trond Myklebust diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index e56564d..9561c8f 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -36,6 +36,7 @@ #include #include /* struct bio */ #include /* various write calls */ +#include #include "blocklayout.h" -- cgit v0.10.2 From 20618b21da0796115e81906d24ff1601552701b7 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Wed, 3 Aug 2011 21:54:33 -0700 Subject: pnfs-obj: Bug when we are running out of bio When we have a situation that the number of pages we want to encode is bigger then the size of the bio. (Which can currently happen only when all IO is going to a single device .e.g group_width==1) then the IO is submitted short and we report back only the amount of bytes we actually wrote/read and all is fine. BUT ... There was a bug that the current length counter was advanced before the fail to add the extra page, and we come to a situation that the CDB length was one-page longer then the actual bio size, which is of course rejected by the osd-target. While here also fix the bio size calculation, in the case that we received more then one group of devices. CC: Stable Tree Signed-off-by: Boaz Harrosh Signed-off-by: Trond Myklebust diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9383ca7..aa8663a 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -589,22 +589,19 @@ static void _calc_stripe_info(struct objio_state *ios, u64 file_offset, } static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg, - unsigned pgbase, struct _objio_per_comp *per_dev, int cur_len, + unsigned pgbase, struct _objio_per_comp *per_dev, int len, gfp_t gfp_flags) { unsigned pg = *cur_pg; + int cur_len = len; struct request_queue *q = osd_request_queue(_io_od(ios, per_dev->dev)); - per_dev->length += cur_len; - if (per_dev->bio == NULL) { - unsigned stripes = ios->layout->num_comps / - ios->layout->mirrors_p1; - unsigned pages_in_stripe = stripes * + unsigned pages_in_stripe = ios->layout->group_width * (ios->layout->stripe_unit / PAGE_SIZE); unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) / - stripes; + ios->layout->group_width; if (BIO_MAX_PAGES_KMALLOC < bio_size) bio_size = BIO_MAX_PAGES_KMALLOC; @@ -632,6 +629,7 @@ static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg, } BUG_ON(cur_len); + per_dev->length += len; *cur_pg = pg; return 0; } -- cgit v0.10.2 From 9af7db3228acc286c50e3a0f054ec982efdbc6c6 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Wed, 3 Aug 2011 21:52:51 -0700 Subject: pnfs-obj: Fix the comp_index != 0 case There were bugs in the case of partial layout where olo_comp_index is not zero. This used to work and was tested but one of the later cleanup SQUASHMEs broke it and was not tested since. Also add a dprint that specify those received layout parameters. Everything else was already printed. [Needed in v3.0] CC: Stable Tree Signed-off-by: Boaz Harrosh Signed-off-by: Trond Myklebust diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index aa8663a..d0cda12 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -479,7 +479,6 @@ static int _io_check(struct objio_state *ios, bool is_write) for (i = 0; i < ios->numdevs; i++) { struct osd_sense_info osi; struct osd_request *or = ios->per_dev[i].or; - unsigned dev; int ret; if (!or) @@ -500,9 +499,8 @@ static int _io_check(struct objio_state *ios, bool is_write) continue; /* we recovered */ } - dev = ios->per_dev[i].dev; - objlayout_io_set_result(&ios->ol_state, dev, - &ios->layout->comps[dev].oc_object_id, + objlayout_io_set_result(&ios->ol_state, i, + &ios->layout->comps[i].oc_object_id, osd_pri_2_pnfs_err(osi.osd_err_pri), ios->per_dev[i].offset, ios->per_dev[i].length, @@ -648,7 +646,7 @@ static int _prepare_one_group(struct objio_state *ios, u64 length, int ret = 0; while (length) { - struct _objio_per_comp *per_dev = &ios->per_dev[dev]; + struct _objio_per_comp *per_dev = &ios->per_dev[dev - first_dev]; unsigned cur_len, page_off = 0; if (!per_dev->length) { @@ -668,8 +666,8 @@ static int _prepare_one_group(struct objio_state *ios, u64 length, cur_len = stripe_unit; } - if (max_comp < dev) - max_comp = dev; + if (max_comp < dev - first_dev) + max_comp = dev - first_dev; } else { cur_len = stripe_unit; } @@ -804,7 +802,7 @@ static int _read_mirrors(struct objio_state *ios, unsigned cur_comp) struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp]; unsigned dev = per_dev->dev; struct pnfs_osd_object_cred *cred = - &ios->layout->comps[dev]; + &ios->layout->comps[cur_comp]; struct osd_obj_id obj = { .partition = cred->oc_object_id.oid_partition_id, .id = cred->oc_object_id.oid_object_id, @@ -902,7 +900,7 @@ static int _write_mirrors(struct objio_state *ios, unsigned cur_comp) for (; cur_comp < last_comp; ++cur_comp, ++dev) { struct osd_request *or = NULL; struct pnfs_osd_object_cred *cred = - &ios->layout->comps[dev]; + &ios->layout->comps[cur_comp]; struct osd_obj_id obj = { .partition = cred->oc_object_id.oid_partition_id, .id = cred->oc_object_id.oid_object_id, diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c index 16fc758..b3918f7 100644 --- a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c +++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c @@ -170,6 +170,9 @@ int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, p = _osd_xdr_decode_data_map(p, &layout->olo_map); layout->olo_comps_index = be32_to_cpup(p++); layout->olo_num_comps = be32_to_cpup(p++); + dprintk("%s: olo_comps_index=%d olo_num_comps=%d\n", __func__, + layout->olo_comps_index, layout->olo_num_comps); + iter->total_comps = layout->olo_num_comps; return 0; } -- cgit v0.10.2 From 55a673990ec04cf63005318bcf08c2b0046e5778 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 2 Aug 2011 14:46:29 -0400 Subject: NFSv4.1: Fix the callback 'highest_used_slotid' behaviour Currently, there is no guarantee that we will call nfs4_cb_take_slot() even though nfs4_callback_compound() will consistently call nfs4_cb_free_slot() provided the cb_process_state has set the 'clp' field. The result is that we can trigger the BUG_ON() upon the next call to nfs4_cb_take_slot(). This patch fixes the above problem by using the slot id that was taken in the CB_SEQUENCE operation as a flag for whether or not we need to call nfs4_cb_free_slot(). It also fixes an atomicity problem: we need to set tbl->highest_used_slotid atomically with the check for NFS4_SESSION_DRAINING, otherwise we end up racing with the various tests in nfs4_begin_drain_session(). Cc: stable@kernel.org [2.6.38+] Signed-off-by: Trond Myklebust diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index b257383..07df5f1 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -38,6 +38,7 @@ enum nfs4_callback_opnum { struct cb_process_state { __be32 drc_status; struct nfs_client *clp; + int slotid; }; struct cb_compound_hdr_arg { @@ -166,7 +167,6 @@ extern unsigned nfs4_callback_layoutrecall( void *dummy, struct cb_process_state *cps); extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); -extern void nfs4_cb_take_slot(struct nfs_client *clp); struct cb_devicenotifyitem { uint32_t cbd_notify_type; diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 74780f9..0ab8202 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -348,7 +348,7 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) /* Normal */ if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { slot->seq_nr++; - return htonl(NFS4_OK); + goto out_ok; } /* Replay */ @@ -367,11 +367,14 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) /* Wraparound */ if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { slot->seq_nr = 1; - return htonl(NFS4_OK); + goto out_ok; } /* Misordered request */ return htonl(NFS4ERR_SEQ_MISORDERED); +out_ok: + tbl->highest_used_slotid = args->csa_slotid; + return htonl(NFS4_OK); } /* @@ -433,26 +436,32 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, struct cb_sequenceres *res, struct cb_process_state *cps) { + struct nfs4_slot_table *tbl; struct nfs_client *clp; int i; __be32 status = htonl(NFS4ERR_BADSESSION); - cps->clp = NULL; - clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid); if (clp == NULL) goto out; + tbl = &clp->cl_session->bc_slot_table; + + spin_lock(&tbl->slot_tbl_lock); /* state manager is resetting the session */ if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { - status = NFS4ERR_DELAY; + spin_unlock(&tbl->slot_tbl_lock); + status = htonl(NFS4ERR_DELAY); goto out; } status = validate_seqid(&clp->cl_session->bc_slot_table, args); + spin_unlock(&tbl->slot_tbl_lock); if (status) goto out; + cps->slotid = args->csa_slotid; + /* * Check for pending referring calls. If a match is found, a * related callback was received before the response to the original @@ -469,7 +478,6 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, res->csr_slotid = args->csa_slotid; res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; - nfs4_cb_take_slot(clp); out: cps->clp = clp; /* put in nfs4_callback_compound */ diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index c6c86a7..918ad64 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -754,26 +754,15 @@ static void nfs4_callback_free_slot(struct nfs4_session *session) * Let the state manager know callback processing done. * A single slot, so highest used slotid is either 0 or -1 */ - tbl->highest_used_slotid--; + tbl->highest_used_slotid = -1; nfs4_check_drain_bc_complete(session); spin_unlock(&tbl->slot_tbl_lock); } -static void nfs4_cb_free_slot(struct nfs_client *clp) +static void nfs4_cb_free_slot(struct cb_process_state *cps) { - if (clp && clp->cl_session) - nfs4_callback_free_slot(clp->cl_session); -} - -/* A single slot, so highest used slotid is either 0 or -1 */ -void nfs4_cb_take_slot(struct nfs_client *clp) -{ - struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table; - - spin_lock(&tbl->slot_tbl_lock); - tbl->highest_used_slotid++; - BUG_ON(tbl->highest_used_slotid != 0); - spin_unlock(&tbl->slot_tbl_lock); + if (cps->slotid != -1) + nfs4_callback_free_slot(cps->clp->cl_session); } #else /* CONFIG_NFS_V4_1 */ @@ -784,7 +773,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) return htonl(NFS4ERR_MINOR_VERS_MISMATCH); } -static void nfs4_cb_free_slot(struct nfs_client *clp) +static void nfs4_cb_free_slot(struct cb_process_state *cps) { } #endif /* CONFIG_NFS_V4_1 */ @@ -866,6 +855,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r struct cb_process_state cps = { .drc_status = 0, .clp = NULL, + .slotid = -1, }; unsigned int nops = 0; @@ -906,7 +896,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r *hdr_res.status = status; *hdr_res.nops = htonl(nops); - nfs4_cb_free_slot(cps.clp); + nfs4_cb_free_slot(&cps); nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; -- cgit v0.10.2 From 910ac68a2b80c7de95bc8488734067b1bb15d583 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 2 Aug 2011 14:46:52 -0400 Subject: NFSv4.1: Return NFS4ERR_BADSESSION to callbacks during session resets If the client is in the process of resetting the session when it receives a callback, then returning NFS4ERR_DELAY may cause a deadlock with the DESTROY_SESSION call. Basically, if the client returns NFS4ERR_DELAY in response to the CB_SEQUENCE call, then the server is entitled to believe that the client is busy because it is already processing that call. In that case, the server is perfectly entitled to respond with a NFS4ERR_BACK_CHAN_BUSY to any DESTROY_SESSION call. Fix this by having the client reply with a NFS4ERR_BADSESSION in response to the callback if it is resetting the session. Cc: stable@kernel.org [2.6.38+] Signed-off-by: Trond Myklebust diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 0ab8202..43926ad 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -452,6 +452,11 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { spin_unlock(&tbl->slot_tbl_lock); status = htonl(NFS4ERR_DELAY); + /* Return NFS4ERR_BADSESSION if we're draining the session + * in order to reset it. + */ + if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) + status = htonl(NFS4ERR_BADSESSION); goto out; } -- cgit v0.10.2 From 1bdfac19b3ecfca545281c15c7aea7ebc2eaef31 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 3 Aug 2011 09:31:49 -0400 Subject: x86-64: Pad vDSO to a page boundary This avoids an information leak to userspace. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/a63380a3c58a0506a2f5a18ba1b12dbde1f25e58.1312378163.git.luto@mit.edu Signed-off-by: H. Peter Anvin diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S index 1b979c1..01f5e3b 100644 --- a/arch/x86/vdso/vdso.S +++ b/arch/x86/vdso/vdso.S @@ -9,6 +9,7 @@ __PAGE_ALIGNED_DATA vdso_start: .incbin "arch/x86/vdso/vdso.so" vdso_end: + .align PAGE_SIZE /* extra data here leaks to userspace. */ .previous -- cgit v0.10.2 From 9c40818da5b39fca236029059ab839857b1ef56c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 3 Aug 2011 09:31:50 -0400 Subject: x86-64: Move the "user" vsyscall segment out of the data segment. The kernel's loader doesn't seem to care, but gold complains. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/f0716870c297242a841b949953d80c0d87bf3d3f.1312378163.git.luto@mit.edu Reported-by: Arkadiusz Miskiewicz Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 4aa9c54..e79fb39 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -154,6 +154,24 @@ SECTIONS #ifdef CONFIG_X86_64 + . = ALIGN(PAGE_SIZE); + __vvar_page = .; + + .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { + + /* Place all vvars at the offsets in asm/vvar.h. */ +#define EMIT_VVAR(name, offset) \ + . = offset; \ + *(.vvar_ ## name) +#define __VVAR_KERNEL_LDS +#include +#undef __VVAR_KERNEL_LDS +#undef EMIT_VVAR + + } :data + + . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); + #define VSYSCALL_ADDR (-10*1024*1024) #define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET) @@ -162,7 +180,6 @@ SECTIONS #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) - . = ALIGN(4096); __vsyscall_0 = .; . = VSYSCALL_ADDR; @@ -185,23 +202,6 @@ SECTIONS #undef VVIRT_OFFSET #undef VVIRT - __vvar_page = .; - - .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { - - /* Place all vvars at the offsets in asm/vvar.h. */ -#define EMIT_VVAR(name, offset) \ - . = offset; \ - *(.vvar_ ## name) -#define __VVAR_KERNEL_LDS -#include -#undef __VVAR_KERNEL_LDS -#undef EMIT_VVAR - - } :data - - . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); - #endif /* CONFIG_X86_64 */ /* Init code and data - will be freed after init */ -- cgit v0.10.2 From f670bb760e7d32ec9c690e748a1d5d04921363ab Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 3 Aug 2011 09:31:51 -0400 Subject: x86-64: Work around gold bug 13023 Gold has trouble assigning numbers to the location counter inside of an output section description. The bug was triggered by 9fd67b4ed0714ab718f1f9bd14c344af336a6df7, which consolidated all of the vsyscall sections into a single section. The workaround is IMO still nicer than the old way of doing it. This produces an apparently valid kernel image and passes my vdso tests on both GNU ld version 2.21.51.0.6-2.fc15 20110118 and GNU gold (version 2.21.51.0.6-2.fc15 20110118) 1.10 as distributed by Fedora 15. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/0b260cb806f1f9a25c00ce8377a5f035d57f557a.1312378163.git.luto@mit.edu Reported-by: Arkadiusz Miskiewicz Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index e79fb39..8f3a265 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -158,10 +158,12 @@ SECTIONS __vvar_page = .; .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) { + /* work around gold bug 13023 */ + __vvar_beginning_hack = .; - /* Place all vvars at the offsets in asm/vvar.h. */ -#define EMIT_VVAR(name, offset) \ - . = offset; \ + /* Place all vvars at the offsets in asm/vvar.h. */ +#define EMIT_VVAR(name, offset) \ + . = __vvar_beginning_hack + offset; \ *(.vvar_ ## name) #define __VVAR_KERNEL_LDS #include @@ -184,15 +186,17 @@ SECTIONS . = VSYSCALL_ADDR; .vsyscall : AT(VLOAD(.vsyscall)) { + /* work around gold bug 13023 */ + __vsyscall_beginning_hack = .; *(.vsyscall_0) - . = 1024; + . = __vsyscall_beginning_hack + 1024; *(.vsyscall_1) - . = 2048; + . = __vsyscall_beginning_hack + 2048; *(.vsyscall_2) - . = 4096; /* Pad the whole page. */ + . = __vsyscall_beginning_hack + 4096; /* Pad the whole page. */ } :user =0xcc . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE); -- cgit v0.10.2 From 5d5791af4c0d4fd32093882357506355c3357503 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 3 Aug 2011 09:31:52 -0400 Subject: x86-64, xen: Enable the vvar mapping Xen needs to handle VVAR_PAGE, introduced in git commit: 9fd67b4ed0714ab718f1f9bd14c344af336a6df7 x86-64: Give vvars their own page Otherwise we die during bootup with a message like: (XEN) mm.c:940:d10 Error getting mfn 1888 (pfn 1e3e48) from L1 entry 8000000001888465 for l1e_owner=10, pg_owner=10 (XEN) mm.c:5049:d10 ptwr_emulate: could not get_page_from_l1e() [ 0.000000] BUG: unable to handle kernel NULL pointer dereference at (null) [ 0.000000] IP: [] xen_set_pte+0x20/0xe0 Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/4659478ed2f3480938f96491c2ecbe2b2e113a23.1312378163.git.luto@mit.edu Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: H. Peter Anvin diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 0ccccb6..2e78619 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1829,6 +1829,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) # endif #else case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: + case VVAR_PAGE: #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: @@ -1869,7 +1870,8 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #ifdef CONFIG_X86_64 /* Replicate changes to map the vsyscall page into the user pagetable vsyscall mapping. */ - if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { + if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || + idx == VVAR_PAGE) { unsigned long vaddr = __fix_to_virt(idx); set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); } -- cgit v0.10.2 From 318f5a2a672152328c9fb4dead504b89ec738a43 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 3 Aug 2011 09:31:53 -0400 Subject: x86-64: Add user_64bit_mode paravirt op Three places in the kernel assume that the only long mode CPL 3 selector is __USER_CS. This is not true on Xen -- Xen's sysretq changes cs to the magic value 0xe033. Two of the places are corner cases, but as of "x86-64: Improve vsyscall emulation CS and RIP handling" (c9712944b2a12373cb6ff8059afcfb7e826a6c54), vsyscalls will segfault if called with Xen's extra CS selector. This causes a panic when older init builds die. It seems impossible to make Xen use __USER_CS reliably without taking a performance hit on every system call, so this fixes the tests instead with a new paravirt op. It's a little ugly because ptrace.h can't include paravirt.h. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/f4fcb3947340d9e96ce1054a432f183f9da9db83.1312378163.git.luto@mit.edu Reported-by: Konrad Rzeszutek Wilk Signed-off-by: H. Peter Anvin diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 7b439d9..41935fa 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -27,8 +27,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in desc->base2 = (info->base_addr & 0xff000000) >> 24; /* - * Don't allow setting of the lm bit. It is useless anyway - * because 64bit system calls require __USER_CS: + * Don't allow setting of the lm bit. It would confuse + * user_64bit_mode and would get overridden by sysret anyway. */ desc->l = 0; } diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 8288509..96a0f80 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -41,6 +41,7 @@ #include #include +#include struct page; struct thread_struct; @@ -63,6 +64,11 @@ struct paravirt_callee_save { struct pv_info { unsigned int kernel_rpl; int shared_kernel_pmd; + +#ifdef CONFIG_X86_64 + u16 extra_user_64bit_cs; /* __USER_CS if none */ +#endif + int paravirt_enabled; const char *name; }; diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 94e7618..3566454 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -131,6 +131,9 @@ struct pt_regs { #ifdef __KERNEL__ #include +#ifdef CONFIG_PARAVIRT +#include +#endif struct cpuinfo_x86; struct task_struct; @@ -187,6 +190,22 @@ static inline int v8086_mode(struct pt_regs *regs) #endif } +#ifdef CONFIG_X86_64 +static inline bool user_64bit_mode(struct pt_regs *regs) +{ +#ifndef CONFIG_PARAVIRT + /* + * On non-paravirt systems, this is the only long mode CPL 3 + * selector. We do not allow long mode selectors in the LDT. + */ + return regs->cs == __USER_CS; +#else + /* Headers are too twisted for this to go in paravirt.h. */ + return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs; +#endif +} +#endif + /* * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode * when it traps. The previous stack will be directly underneath the saved diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 869e1ae..681f159 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -299,6 +299,10 @@ struct pv_info pv_info = { .paravirt_enabled = 0, .kernel_rpl = 0, .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ + +#ifdef CONFIG_X86_64 + .extra_user_64bit_cs = __USER_CS, +#endif }; struct pv_init_ops pv_init_ops = { diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c index 7977f0c..c346d11 100644 --- a/arch/x86/kernel/step.c +++ b/arch/x86/kernel/step.c @@ -74,7 +74,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs) #ifdef CONFIG_X86_64 case 0x40 ... 0x4f: - if (regs->cs != __USER_CS) + if (!user_64bit_mode(regs)) /* 32-bit mode: register increment */ return 0; /* 64-bit mode: REX prefix */ diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index dda7dff..1725930 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -127,11 +127,7 @@ void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) local_irq_enable(); - /* - * Real 64-bit user mode code has cs == __USER_CS. Anything else - * is bogus. - */ - if (regs->cs != __USER_CS) { + if (!user_64bit_mode(regs)) { /* * If we trapped from kernel mode, we might as well OOPS now * instead of returning to some random address and OOPSing diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2dbf6bf..c1d0182 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -105,7 +105,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, * but for now it's good enough to assume that long * mode only uses well known segments or kernel. */ - return (!user_mode(regs)) || (regs->cs == __USER_CS); + return (!user_mode(regs) || user_64bit_mode(regs)); #endif case 0x60: /* 0x64 thru 0x67 are valid prefixes in all modes. */ diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5525163..78fe33d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -937,6 +937,10 @@ static const struct pv_info xen_info __initconst = { .paravirt_enabled = 1, .shared_kernel_pmd = 0, +#ifdef CONFIG_X86_64 + .extra_user_64bit_cs = FLAT_USER_CS64, +#endif + .name = "Xen", }; -- cgit v0.10.2 From c149a665ac488e0dac22a42287f45ad1bda06ff1 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 3 Aug 2011 09:31:54 -0400 Subject: x86-64: Add vsyscall:emulate_vsyscall trace event Vsyscall emulation is slow, so make it easy to track down. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/cdaad7da946a80b200df16647c1700db3e1171e9.1312378163.git.luto@mit.edu Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 1725930..93a0d46 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -50,6 +50,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include "vsyscall_trace.h" + DEFINE_VVAR(int, vgetcpu_mode); DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = { @@ -146,6 +149,9 @@ void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) * and int 0xcc is two bytes long. */ vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2); + + trace_emulate_vsyscall(vsyscall_nr); + if (vsyscall_nr < 0) { warn_bad_vsyscall(KERN_WARNING, regs, "illegal int 0xcc (exploit attempt?)"); diff --git a/arch/x86/kernel/vsyscall_trace.h b/arch/x86/kernel/vsyscall_trace.h new file mode 100644 index 0000000..a8b2ede --- /dev/null +++ b/arch/x86/kernel/vsyscall_trace.h @@ -0,0 +1,29 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM vsyscall + +#if !defined(__VSYSCALL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define __VSYSCALL_TRACE_H + +#include + +TRACE_EVENT(emulate_vsyscall, + + TP_PROTO(int nr), + + TP_ARGS(nr), + + TP_STRUCT__entry(__field(int, nr)), + + TP_fast_assign( + __entry->nr = nr; + ), + + TP_printk("nr = %d", __entry->nr) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/x86/kernel +#define TRACE_INCLUDE_FILE vsyscall_trace +#include -- cgit v0.10.2 From 326ed6a9bcf8d451a6d714d10c8b0f40941a3ed3 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Mon, 25 Jul 2011 11:02:11 +0000 Subject: powerpc: mtspr/mtmsr should take an unsigned long Add a cast in case the caller passes in a different type, as it would if mtspr/mtmsr were functions. Previously, if a 64-bit type was passed in on 32-bit, GCC would bind the constraint to a pair of registers, and would substitute the first register in the pair in the asm code. This corresponds to the upper half of the 64-bit register, which is generally not the desired behavior. Signed-off-by: Scott Wood Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index e8aaf6f..9561e60 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1024,13 +1024,16 @@ #define mtmsrd(v) __mtmsrd((v), 0) #define mtmsr(v) mtmsrd(v) #else -#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory") +#define mtmsr(v) asm volatile("mtmsr %0" : \ + : "r" ((unsigned long)(v)) \ + : "memory") #endif #define mfspr(rn) ({unsigned long rval; \ asm volatile("mfspr %0," __stringify(rn) \ : "=r" (rval)); rval;}) -#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v)\ +#define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : \ + : "r" ((unsigned long)(v)) \ : "memory") #ifdef __powerpc64__ -- cgit v0.10.2 From 26ee97672eaee9725bd7d66c3964579c4af7037d Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Mon, 25 Jul 2011 11:04:36 +0000 Subject: powerpc: Return the_cpu_ spec from identify_cpu Commit af9eef3c7b1ed004c378c89b87642f4937337d50 caused cpu_setup to see the_cpu_spec, rather than the source struct. However, on 32-bit, the return value of identify_cpu was being used for feature fixups, and identify_cpu was returning the source struct. So if cpu_setup patches the feature bits, the update won't affect the fixups. Signed-off-by: Scott Wood Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9fb9332..fa44ff5 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = { static struct cpu_spec the_cpu_spec; -static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) +static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, + struct cpu_spec *s) { struct cpu_spec *t = &the_cpu_spec; struct cpu_spec old; @@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ + + return t; } struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) @@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) s = PTRRELOC(s); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { - if ((pvr & s->pvr_mask) == s->pvr_value) { - setup_cpu_spec(offset, s); - return s; - } + if ((pvr & s->pvr_mask) == s->pvr_value) + return setup_cpu_spec(offset, s); } BUG(); -- cgit v0.10.2 From 966728dd88b4026ec58fee169ccceaeaf56ef120 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 25 Jul 2011 20:47:07 +0000 Subject: powerpc: Fix device tree claim code I have a box that fails in OF during boot with: DEFAULT CATCH!, exception-handler=fff00400 at %SRR0: 49424d2c4c6f6768 %SRR1: 800000004000b002 ie "IBM,Logh". OF got corrupted with a device tree string. Looking at make_room and alloc_up, we claim the first chunk (1 MB) but we never claim any more. mem_end is always set to alloc_top which is the top of our available address space, guaranteeing we will never call alloc_up and claim more memory. Also alloc_up wasn't setting alloc_bottom to the bottom of the available address space. This doesn't help the box to boot, but we at least fail with an obvious error. We could relocate the device tree in a future patch. Signed-off-by: Anton Blanchard Cc: Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index c016033..3b22142 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1020,7 +1020,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) } if (addr == 0) return 0; - RELOC(alloc_bottom) = addr; + RELOC(alloc_bottom) = addr + size; prom_debug(" -> %x\n", addr); prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); @@ -1834,7 +1834,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, chunk = alloc_up(room, 0); if (chunk == 0) prom_panic("No memory for flatten_device_tree (claim failed)"); - *mem_end = RELOC(alloc_top); + *mem_end = chunk + room; } ret = (void *)*mem_start; @@ -2053,7 +2053,7 @@ static void __init flatten_device_tree(void) mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); if (mem_start == 0) prom_panic("Can't allocate initial device-tree chunk\n"); - mem_end = RELOC(alloc_top); + mem_end = mem_start + room; /* Get root of tree */ root = call_prom("peer", 1, 1, (phandle)0); -- cgit v0.10.2 From fbafd728151ccc8665584bde78deb03dbb9ef055 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 25 Jul 2011 20:47:51 +0000 Subject: powerpc: Clean up some panic messages in prom_init Add a newline to the panic messages in make_room. Also fix a comment that suggested our chunk size is 4Mb. It's 1MB. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 3b22142..a909f4e 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1830,10 +1830,12 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; if (room < PAGE_SIZE) - prom_panic("No memory for flatten_device_tree (no room)"); + prom_panic("No memory for flatten_device_tree " + "(no room)\n"); chunk = alloc_up(room, 0); if (chunk == 0) - prom_panic("No memory for flatten_device_tree (claim failed)"); + prom_panic("No memory for flatten_device_tree " + "(claim failed)\n"); *mem_end = chunk + room; } @@ -2042,7 +2044,7 @@ static void __init flatten_device_tree(void) /* * Check how much room we have between alloc top & bottom (+/- a - * few pages), crop to 4Mb, as this is our "chuck" size + * few pages), crop to 1MB, as this is our "chunk" size */ room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000; if (room > DEVTREE_CHUNK_SIZE) -- cgit v0.10.2 From c113a3aee2b68e311f2bc55f70fe56b64c3a476b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 26 Jul 2011 14:35:46 +0000 Subject: powerpc: Jump label misalignment causes oops at boot I hit an oops at boot on the first instruction of timer_cpu_notify: NIP [c000000000722f88] .timer_cpu_notify+0x0/0x388 The code should look like: c000000000722f78: eb e9 00 30 ld r31,48(r9) c000000000722f7c: 2f bf 00 00 cmpdi cr7,r31,0 c000000000722f80: 40 9e ff 44 bne+ cr7,c000000000722ec4 c000000000722f84: 4b ff ff 74 b c000000000722ef8 c000000000722f88 <.timer_cpu_notify>: c000000000722f88: 7c 08 02 a6 mflr r0 c000000000722f8c: 2f a4 00 07 cmpdi cr7,r4,7 c000000000722f90: fb c1 ff f0 std r30,-16(r1) c000000000722f94: fb 61 ff d8 std r27,-40(r1) But the oops output shows: eb61ffd8 eb81ffe0 eba1ffe8 ebc1fff0 7c0803a6 ebe1fff8 4e800020 00000000 ebe90030 c0000000 00ad0a28 00000000 2fa40007 fbc1fff0 fb61ffd8 So we scribbled over our instructions with c000000000ad0a28, which is an address inside the jump_table ELF section. It turns out the jump_table section is only aligned to 8 bytes but we are aligning our entries within the section to 16 bytes. This means our entries are offset from the table: c000000000acd4a8 <__start___jump_table>: ... c000000000ad0a10: c0 00 00 00 lfs f0,0(0) c000000000ad0a14: 00 70 cd 5c .long 0x70cd5c c000000000ad0a18: c0 00 00 00 lfs f0,0(0) c000000000ad0a1c: 00 70 cd 90 .long 0x70cd90 c000000000ad0a20: c0 00 00 00 lfs f0,0(0) c000000000ad0a24: 00 ac a4 20 .long 0xaca420 And the jump table sort code gets very confused and writes into the wrong spot. Remove the alignment, and also remove the padding since we it saves some space and we shouldn't need it. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 1f780b9..938986e 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -22,7 +22,6 @@ static __always_inline bool arch_static_branch(struct jump_label_key *key) asm goto("1:\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" - ".align 4\n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" ".popsection \n\t" : : "i" (key) : : l_yes); @@ -41,7 +40,6 @@ struct jump_entry { jump_label_t code; jump_label_t target; jump_label_t key; - jump_label_t pad; }; #endif /* _ASM_POWERPC_JUMP_LABEL_H */ -- cgit v0.10.2 From bed9a31527af8ff3dfbad62a1a42815cef4baab7 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 26 Jul 2011 18:15:03 +0000 Subject: powerpc: pseries: Fix kexec on machines with more than 4TB of RAM On a box with 8TB of RAM the MMU hashtable is 64GB in size. That means we have 4G PTEs. pSeries_lpar_hptab_clear was using a signed int to store the index which will overflow at 2G. Signed-off-by: Anton Blanchard Cc: Acked-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f7205d3..225aecf 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -204,7 +204,7 @@ static void pSeries_lpar_hptab_clear(void) unsigned long ptel; } ptes[4]; long lpar_rc; - int i, j; + unsigned long i, j; /* Read in batches of 4, * invalidate only valid entries not in the VRMA -- cgit v0.10.2 From 501d238633a3f9869f4e777b3b281ca7660b7156 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Jul 2011 07:27:21 +0000 Subject: ppc: Remove duplicate definition of PV_POWER7 One definition of PV_POWER7 seems enough to me. Signed-off-by: Peter Zijlstra Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 9561e60..559da19 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1003,7 +1003,6 @@ #define PV_970 0x0039 #define PV_POWER5 0x003A #define PV_POWER5p 0x003B -#define PV_POWER7 0x003F #define PV_970FX 0x003C #define PV_POWER6 0x003E #define PV_POWER7 0x003F -- cgit v0.10.2 From b59a1bfcc2406ea75346977ad016cfe909a762ac Mon Sep 17 00:00:00 2001 From: David Ahern Date: Sat, 30 Jul 2011 10:53:20 +0000 Subject: powerpc/perf: Disable pagefaults during callchain stack read Panic observed on an older kernel when collecting call chains for the context-switch software event: []rb_erase+0x1b4/0x3e8 []__dequeue_entity+0x50/0xe8 []set_next_entity+0x178/0x1bc []pick_next_task_fair+0xb0/0x118 []schedule+0x500/0x614 []rwsem_down_failed_common+0xf0/0x264 []rwsem_down_read_failed+0x34/0x54 []down_read+0x3c/0x54 []do_page_fault+0x114/0x5e8 []handle_page_fault+0xc/0x80 []perf_callchain+0x224/0x31c []perf_prepare_sample+0x240/0x2fc []__perf_event_overflow+0x280/0x398 []perf_swevent_overflow+0x9c/0x10c []perf_swevent_ctx_event+0x1d0/0x230 []do_perf_sw_event+0x84/0xe4 []perf_sw_event_context_switch+0x150/0x1b4 []perf_event_task_sched_out+0x44/0x2d4 []schedule+0x2c0/0x614 []__cond_resched+0x34/0x90 []_cond_resched+0x4c/0x68 []move_page_tables+0xb0/0x418 []setup_arg_pages+0x184/0x2a0 []load_elf_binary+0x394/0x1208 []search_binary_handler+0xe0/0x2c4 []do_execve+0x1bc/0x268 []sys_execve+0x84/0xc8 []ret_from_syscall+0x0/0x3c A page fault occurred walking the callchain while creating a perf sample for the context-switch event. To handle the page fault the mmap_sem is needed, but it is currently held by setup_arg_pages. (setup_arg_pages calls shift_arg_pages with the mmap_sem held. shift_arg_pages then calls move_page_tables which has a cond_resched at the top of its for loop - hitting that cond_resched is what caused the context switch.) This is an extension of Anton's proposed patch: https://lkml.org/lkml/2011/7/24/151 adding case for 32-bit ppc. Tested on the system that first generated the panic and then again with latest kernel using a PPC VM. I am not able to test the 64-bit path - I do not have H/W for it and 64-bit PPC VMs (qemu on Intel) is horribly slow. Signed-off-by: David Ahern Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index d05ae42..564c1d8 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) ((unsigned long)ptr & 7)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 8); } @@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) ((unsigned long)ptr & 3)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 4); } @@ -294,11 +302,17 @@ static inline int current_is_64bit(void) */ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) { + int rc; + if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || ((unsigned long)ptr & 3)) return -EFAULT; - return __get_user_inatomic(*ret, ptr); + pagefault_disable(); + rc = __get_user_inatomic(*ret, ptr); + pagefault_enable(); + + return rc; } static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, -- cgit v0.10.2 From 8aa6d359298ad284a202dc43f103e2f8100a6e82 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 31 Jul 2011 19:27:35 +0000 Subject: powerpc: Move kdump default base address to half RMO size on 64bit We are seeing boot failures on some very large boxes even with commit b5416ca9f824 (powerpc: Move kdump default base address to 64MB on 64bit). This patch halves the RMO so both kernels get about the same amount of RMO memory. On large machines this region will be at least 256MB, so each kernel will get 128MB. We cap it at 256MB (small SLB size) since some early allocations need to be in the bolted SLB region. We could relax this on machines with 1TB SLBs in a future patch. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h index 6857af5..bffd062 100644 --- a/arch/powerpc/include/asm/kdump.h +++ b/arch/powerpc/include/asm/kdump.h @@ -3,17 +3,7 @@ #include -/* - * If CONFIG_RELOCATABLE is enabled we can place the kdump kernel anywhere. - * To keep enough space in the RMO for the first stage kernel on 64bit, we - * place it at 64MB. If CONFIG_RELOCATABLE is not enabled we must place - * the second stage at 32MB. - */ -#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC64) -#define KDUMP_KERNELBASE 0x4000000 -#else #define KDUMP_KERNELBASE 0x2000000 -#endif /* How many bytes to reserve at zero for kdump. The reserve limit should * be greater or equal to the trampoline's end address. diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 6658a15..9ce1672 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -136,12 +136,16 @@ void __init reserve_crashkernel(void) crashk_res.start = KDUMP_KERNELBASE; #else if (!crashk_res.start) { +#ifdef CONFIG_PPC64 /* - * unspecified address, choose a region of specified size - * can overlap with initrd (ignoring corruption when retained) - * ppc64 requires kernel and some stacks to be in first segemnt + * On 64bit we split the RMO in half but cap it at half of + * a small SLB (128MB) since the crash kernel needs to place + * itself and some stacks to be in the first segment. */ + crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); +#else crashk_res.start = KDUMP_KERNELBASE; +#endif } crash_base = PAGE_ALIGN(crashk_res.start); -- cgit v0.10.2 From 53876e387d962f7f37747150f33f2aa920a7b20c Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 31 Jul 2011 19:30:04 +0000 Subject: powerpc: Lack of ibm,io-events not that important! The ibm,io-events code is a bit verbose with its error messages. Reverse the reporting so we only print when we successfully enable I/O event interrupts. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c index c829e60..2c4dd1f 100644 --- a/arch/powerpc/platforms/pseries/io_event_irq.c +++ b/arch/powerpc/platforms/pseries/io_event_irq.c @@ -212,17 +212,15 @@ static int __init ioei_init(void) struct device_node *np; ioei_check_exception_token = rtas_token("check-exception"); - if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) { - pr_warning("IO Event IRQ not supported on this system !\n"); + if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) return -ENODEV; - } + np = of_find_node_by_path("/event-sources/ibm,io-events"); if (np) { request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); + pr_info("IBM I/O event interrupts enabled\n"); of_node_put(np); } else { - pr_err("io_event_irq: No ibm,io-events on system! " - "IO Event interrupt disabled.\n"); return -ENODEV; } return 0; -- cgit v0.10.2 From 2c740c5841ba69aed216fdf7180f06e693165a7b Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Wed, 3 Aug 2011 08:55:54 +0000 Subject: powerpc/kvm: Fix build errors with older toolchains On a box with gcc 4.3.2, I see errors like: arch/powerpc/kvm/book3s_hv_rmhandlers.S:1254: Error: Unrecognized opcode: stxvd2x arch/powerpc/kvm/book3s_hv_rmhandlers.S:1316: Error: Unrecognized opcode: lxvd2x Signed-off-by: Nishanth Aravamudan Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 6dd3358..de29501 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1251,7 +1251,7 @@ BEGIN_FTR_SECTION reg = 0 .rept 32 li r6,reg*16+VCPU_VSRS - stxvd2x reg,r6,r3 + STXVD2X(reg,r6,r3) reg = reg + 1 .endr FTR_SECTION_ELSE @@ -1313,7 +1313,7 @@ BEGIN_FTR_SECTION reg = 0 .rept 32 li r7,reg*16+VCPU_VSRS - lxvd2x reg,r7,r4 + LXVD2X(reg,r7,r4) reg = reg + 1 .endr FTR_SECTION_ELSE -- cgit v0.10.2 From 643ba4e3077f8d1c6b1cd5cc9ea3406198a833ff Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 4 Aug 2011 17:23:58 +0000 Subject: powerpc: Make KVM_GUEST default to n KVM_GUEST adds a 1 MB array to the kernel (kvm_tmp) which grew my kernel enough to cause it to fail to boot. Dynamically allocating or reducing the size of this array is a good idea, but in the meantime I think it makes sense to make KVM_GUEST default to n in order to minimise surprises. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index d0af7fb..b9ba861 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -24,7 +24,7 @@ source "arch/powerpc/platforms/wsp/Kconfig" config KVM_GUEST bool "KVM Guest support" - default y + default n ---help--- This option enables various optimizations for running under the KVM hypervisor. Overhead for the kernel when not running inside KVM should -- cgit v0.10.2 From a149507bdb78d69e0020dbb505f3c55b205b69b3 Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Wed, 20 Jul 2011 19:04:25 +0000 Subject: MAINTAINERS: change maintainership of mpc5xxx Grant intends to hand over maintainership of mpc5xxx to me. Change MPC5XXX entry in MAINTAINERS accordingly. Signed-off-by: Anatolij Gustschin Signed-off-by: Benjamin Herrenschmidt diff --git a/MAINTAINERS b/MAINTAINERS index 07cfd8d..2c1bc6e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3906,9 +3906,9 @@ F: arch/powerpc/platforms/powermac/ F: drivers/macintosh/ LINUX FOR POWERPC EMBEDDED MPC5XXX -M: Grant Likely +M: Anatolij Gustschin L: linuxppc-dev@lists.ozlabs.org -T: git git://git.secretlab.ca/git/linux-2.6.git +T: git git://git.denx.de/linux-2.6-agust.git S: Maintained F: arch/powerpc/platforms/512x/ F: arch/powerpc/platforms/52xx/ -- cgit v0.10.2 From b1301797f30370c430244979671978fc232f4533 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 25 Jul 2011 01:46:32 +0000 Subject: powerpc/pseries: Fix kexec on recent firmware versions Recent versions of firmware will fail to unmap the virtual processor area if we have a dispatch trace log registered. This causes kexec to fail. If a trace log is registered this patch unregisters it before the SLB shadow and virtual processor areas, fixing the problem. The address argument is ignored by firmware on unregister so we may as well remove it. Signed-off-by: Anton Blanchard Cc: Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index e919007..0e86563 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -181,7 +181,7 @@ static void dtl_stop(struct dtl *dtl) lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; - unregister_dtl(hwcpu, __pa(dtl->buf)); + unregister_dtl(hwcpu); } static u64 dtl_current_index(struct dtl *dtl) diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 54cf3a4..1118cb7 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -26,6 +26,17 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) /* Don't risk a hypervisor call if we're crashing */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { unsigned long addr; + int ret; + + if (get_lppaca()->dtl_enable_mask) { + ret = unregister_dtl(hard_smp_processor_id()); + if (ret) { + pr_err("WARNING: DTL deregistration for cpu " + "%d (hw %d) failed with %d\n", + smp_processor_id(), + hard_smp_processor_id(), ret); + } + } addr = __pa(get_slb_shadow()); if (unregister_slb_shadow(hard_smp_processor_id(), addr)) diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 4bf2120..a6921ae 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -73,9 +73,9 @@ static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa) return vpa_call(0x3, cpu, vpa); } -static inline long unregister_dtl(unsigned long cpu, unsigned long vpa) +static inline long unregister_dtl(unsigned long cpu) { - return vpa_call(0x6, cpu, vpa); + return vpa_call(0x6, cpu, 0); } static inline long register_dtl(unsigned long cpu, unsigned long vpa) -- cgit v0.10.2 From 711ef84e80ec6f937ad59c7a00490421a5c92867 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 25 Jul 2011 01:46:33 +0000 Subject: powerpc/pseries: Cleanup VPA registration and deregistration errors Make the VPA, SLB shadow and DTL registration and deregistration functions print consistent messages on error. I needed the firmware error code while chasing a kexec bug but we weren't printing it. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 1118cb7..3bae8bd 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -39,17 +39,20 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) } addr = __pa(get_slb_shadow()); - if (unregister_slb_shadow(hard_smp_processor_id(), addr)) - printk("SLB shadow buffer deregistration of " - "cpu %u (hw_cpu_id %d) failed\n", + ret = unregister_slb_shadow(hard_smp_processor_id(), addr); + if (ret) { + pr_err("WARNING: SLB shadow buffer deregistration " + "for cpu %d (hw %d) failed with %d\n", smp_processor_id(), - hard_smp_processor_id()); + hard_smp_processor_id(), ret); + } addr = __pa(get_lppaca()); - if (unregister_vpa(hard_smp_processor_id(), addr)) { - printk("VPA deregistration of cpu %u (hw_cpu_id %d) " - "failed\n", smp_processor_id(), - hard_smp_processor_id()); + ret = unregister_vpa(hard_smp_processor_id(), addr); + if (ret) { + pr_err("WARNING: VPA deregistration for cpu %d " + "(hw %d) failed with %d\n", smp_processor_id(), + hard_smp_processor_id(), ret); } } } diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 225aecf..c9a29da 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -67,9 +67,8 @@ void vpa_init(int cpu) ret = register_vpa(hwcpu, addr); if (ret) { - printk(KERN_ERR "WARNING: vpa_init: VPA registration for " - "cpu %d (hw %d) of area %lx returns %ld\n", - cpu, hwcpu, addr, ret); + pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " + "%lx failed with %ld\n", cpu, hwcpu, addr, ret); return; } /* @@ -80,10 +79,9 @@ void vpa_init(int cpu) if (firmware_has_feature(FW_FEATURE_SPLPAR)) { ret = register_slb_shadow(hwcpu, addr); if (ret) - printk(KERN_ERR - "WARNING: vpa_init: SLB shadow buffer " - "registration for cpu %d (hw %d) of area %lx " - "returns %ld\n", cpu, hwcpu, addr, ret); + pr_err("WARNING: SLB shadow buffer registration for " + "cpu %d (hw %d) of area %lx failed with %ld\n", + cpu, hwcpu, addr, ret); } /* @@ -100,8 +98,9 @@ void vpa_init(int cpu) dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; ret = register_dtl(hwcpu, __pa(dtl)); if (ret) - pr_warn("DTL registration failed for cpu %d (%ld)\n", - cpu, ret); + pr_err("WARNING: DTL registration of cpu %d (hw %d) " + "failed with %ld\n", smp_processor_id(), + hwcpu, ret); lppaca_of(cpu).dtl_enable_mask = 2; } } diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index d00e529..0969fd9 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -324,8 +324,9 @@ static int alloc_dispatch_logs(void) dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); if (ret) - pr_warn("DTL registration failed for boot cpu %d (%d)\n", - smp_processor_id(), ret); + pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " + "with %d\n", smp_processor_id(), + hard_smp_processor_id(), ret); get_paca()->lppaca_ptr->dtl_enable_mask = 2; return 0; -- cgit v0.10.2 From 598c8231ab54cfcc8ea6f52882cefee98b129bd1 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 25 Jul 2011 01:46:34 +0000 Subject: powerpc/pseries: Simplify vpa deregistration functions The VPA, SLB shadow and DTL degistration functions do not need an address, so simplify things and remove it. Also cleanup pseries_kexec_cpu_down a bit by storing the cpu IDs in local variables. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index bc02885..83a3ca2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -135,7 +135,7 @@ static void pseries_mach_cpu_die(void) get_lppaca()->idle = 0; if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { - unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); + unregister_slb_shadow(hwcpu); /* * Call to start_secondary_resume() will not return. @@ -150,7 +150,7 @@ static void pseries_mach_cpu_die(void) WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); set_cpu_current_state(cpu, CPU_STATE_OFFLINE); - unregister_slb_shadow(hwcpu, __pa(get_slb_shadow())); + unregister_slb_shadow(hwcpu); rtas_stop_self(); /* Should never get here... */ diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 3bae8bd..7d94bdc 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c @@ -25,34 +25,30 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { /* Don't risk a hypervisor call if we're crashing */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { - unsigned long addr; int ret; + int cpu = smp_processor_id(); + int hwcpu = hard_smp_processor_id(); if (get_lppaca()->dtl_enable_mask) { - ret = unregister_dtl(hard_smp_processor_id()); + ret = unregister_dtl(hwcpu); if (ret) { pr_err("WARNING: DTL deregistration for cpu " "%d (hw %d) failed with %d\n", - smp_processor_id(), - hard_smp_processor_id(), ret); + cpu, hwcpu, ret); } } - addr = __pa(get_slb_shadow()); - ret = unregister_slb_shadow(hard_smp_processor_id(), addr); + ret = unregister_slb_shadow(hwcpu); if (ret) { pr_err("WARNING: SLB shadow buffer deregistration " "for cpu %d (hw %d) failed with %d\n", - smp_processor_id(), - hard_smp_processor_id(), ret); + cpu, hwcpu, ret); } - addr = __pa(get_lppaca()); - ret = unregister_vpa(hard_smp_processor_id(), addr); + ret = unregister_vpa(hwcpu); if (ret) { pr_err("WARNING: VPA deregistration for cpu %d " - "(hw %d) failed with %d\n", smp_processor_id(), - hard_smp_processor_id(), ret); + "(hw %d) failed with %d\n", cpu, hwcpu, ret); } } } diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index a6921ae..41c24c1 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h @@ -53,9 +53,9 @@ static inline long vpa_call(unsigned long flags, unsigned long cpu, return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa); } -static inline long unregister_vpa(unsigned long cpu, unsigned long vpa) +static inline long unregister_vpa(unsigned long cpu) { - return vpa_call(0x5, cpu, vpa); + return vpa_call(0x5, cpu, 0); } static inline long register_vpa(unsigned long cpu, unsigned long vpa) @@ -63,9 +63,9 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa) return vpa_call(0x1, cpu, vpa); } -static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa) +static inline long unregister_slb_shadow(unsigned long cpu) { - return vpa_call(0x7, cpu, vpa); + return vpa_call(0x7, cpu, 0); } static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa) -- cgit v0.10.2 From 883a805d617baca1c01172dc1d35e37829ffed0c Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 5 Aug 2011 15:59:40 +1000 Subject: powerpc/4xx: Fix build of PCI code on 405 Commit 112d1fe9f7715db423ffeec5ac1beccff6093dc4 "powerpc/4xx: Add check_link to struct ppc4xx_pciex_hwops" inadvertently broke 405 builds due to some functions being over protected by an ifdef CONFIG_44x. Move them back out. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c index a59ba96..dbfe96b 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.c +++ b/arch/powerpc/sysdev/ppc4xx_pci.c @@ -655,8 +655,6 @@ struct ppc4xx_pciex_hwops static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops; -#ifdef CONFIG_44x - static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port, unsigned int sdr_offset, unsigned int mask, @@ -688,6 +686,7 @@ static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port) return 0; } + static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port) { printk(KERN_INFO "PCIE%d: Checking link...\n", port->index); @@ -718,6 +717,8 @@ static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port) printk(KERN_INFO "PCIE%d: No device detected.\n", port->index); } +#ifdef CONFIG_44x + /* Check various reset bits of the 440SPe PCIe core */ static int __init ppc440spe_pciex_check_reset(struct device_node *np) { -- cgit v0.10.2 From 81210c2062cf98bf625bcd487334c89b0fce5a82 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 5 Aug 2011 16:01:20 +1000 Subject: powerpc: Fix build without CONFIG_PCI Commit fea80311a939a746533a6d7e7c3183729d6a3faf "iomap: make IOPORT/PCI mapping functions conditional" Broke powerpc build without CONFIG_PCI as we would still define pci_iomap(), which overlaps with the new empty inline in the headers. Make our implementation conditional on CONFIG_PCI Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 1577434..faca64a 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr) EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); +#ifdef CONFIG_PCI void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { resource_size_t start = pci_resource_start(dev, bar); @@ -143,6 +144,7 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr) return; iounmap(addr); } +#endif /* CONFIG_PCI */ EXPORT_SYMBOL(pci_iomap); EXPORT_SYMBOL(pci_iounmap); -- cgit v0.10.2 From 02651d1a9725f29e95296c37cf9a4f16e9c01bff Mon Sep 17 00:00:00 2001 From: Miller Puckette Date: Thu, 4 Aug 2011 12:25:56 -0700 Subject: ALSA: usb-audio - add quirk for Keith McMillen StringPort Signed-off-by: Miller Puckette Signed-off-by: Takashi Iwai diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index dba0b7f..4d4f865 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2417,6 +2417,12 @@ YAMAHA_DEVICE(0x7010, "UB99"), .idProduct = 0x1020, }, +/* KeithMcMillen Stringport */ +{ + USB_DEVICE(0x1f38, 0x0001), + .bInterfaceClass = USB_CLASS_AUDIO, +}, + /* Miditech devices */ { USB_DEVICE(0x4752, 0x0011), -- cgit v0.10.2 From 18b08c55a9b04c8783420fb6657599ad724459cc Mon Sep 17 00:00:00 2001 From: Deepak Saxena Date: Thu, 4 Aug 2011 23:39:58 -0700 Subject: Input: remove CLOCK_TICK_RATE from analog joystick driver The analog joystick driver is written for x86 systems. This patch updates it to use the PIT_TICK_RATE value instead of CLOCK_TICK_RATE as they are equivalent on x86 and we want to deprecate the latter. Signed-off-by: Deepak Saxena Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 4afe0a3..c021317 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -139,7 +139,7 @@ struct analog_port { #include #define GET_TIME(x) do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0) -#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? CLOCK_TICK_RATE / HZ : 0))) +#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0))) #define TIME_NAME (cpu_has_tsc?"TSC":"PIT") static unsigned int get_time_pit(void) { -- cgit v0.10.2 From 35ae66e0a09ab70ed588e65f26b4c725cd1656b6 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Fri, 5 Aug 2011 09:37:10 +0200 Subject: block: Make rq_affinity = 1 work as expected Commit 5757a6d76c introduced a new rq_affinity = 2 so as to make the request completed in the __make_request cpu. But it makes the old rq_affinity = 1 not work any more. The root cause is that if the 'cpu' and 'req->cpu' is in the same group and cpu != req->cpu, ccpu will be the same as group_cpu, so the completion will be excuted in the 'cpu' not 'group_cpu'. This patch fix problem by simpling removing group_cpu and the codes are more explicit now. If ccpu == cpu, we complete in cpu, otherwise we raise_blk_irq to ccpu. Cc: Christoph Hellwig Cc: Roland Dreier Cc: Dan Williams Cc: Jens Axboe Signed-off-by: Tao Ma Reviewed-by: Shaohua Li Signed-off-by: Jens Axboe diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 475fab8..487addc 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -103,7 +103,7 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = { void __blk_complete_request(struct request *req) { - int ccpu, cpu, group_cpu = NR_CPUS; + int ccpu, cpu; struct request_queue *q = req->q; unsigned long flags; @@ -117,14 +117,12 @@ void __blk_complete_request(struct request *req) */ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) { ccpu = req->cpu; - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) ccpu = blk_cpu_to_group(ccpu); - group_cpu = blk_cpu_to_group(cpu); - } } else ccpu = cpu; - if (ccpu == cpu || ccpu == group_cpu) { + if (ccpu == cpu) { struct list_head *list; do_local: list = &__get_cpu_var(blk_cpu_done); -- cgit v0.10.2 From 4931402a9dd00b2997e95bfbb89409b2a6dbb383 Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Fri, 5 Aug 2011 09:42:20 +0200 Subject: cfq-iosched: Add documentation about idling There are always questions about why CFQ is idling on various conditions. Recent ones is Christoph asking again why to idle on REQ_NOIDLE. His assertion is that XFS is relying more and more on workqueues and is concerned that CFQ idling on IO from every workqueue will impact XFS badly. So he suggested that I add some more documentation about CFQ idling and that can provide more clarity on the topic and also gives an opprotunity to poke a hole in theory and lead to improvements. So here is my attempt at that. Any comments are welcome. Signed-off-by: Vivek Goyal Signed-off-by: Jens Axboe diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt index e578fee..6d670f5 100644 --- a/Documentation/block/cfq-iosched.txt +++ b/Documentation/block/cfq-iosched.txt @@ -43,3 +43,74 @@ If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches to IOPS mode and starts providing fairness in terms of number of requests dispatched. Note that this mode switching takes effect only for group scheduling. For non-cgroup users nothing should change. + +CFQ IO scheduler Idling Theory +=============================== +Idling on a queue is primarily about waiting for the next request to come +on same queue after completion of a request. In this process CFQ will not +dispatch requests from other cfq queues even if requests are pending there. + +The rationale behind idling is that it can cut down on number of seeks +on rotational media. For example, if a process is doing dependent +sequential reads (next read will come on only after completion of previous +one), then not dispatching request from other queue should help as we +did not move the disk head and kept on dispatching sequential IO from +one queue. + +CFQ has following service trees and various queues are put on these trees. + + sync-idle sync-noidle async + +All cfq queues doing synchronous sequential IO go on to sync-idle tree. +On this tree we idle on each queue individually. + +All synchronous non-sequential queues go on sync-noidle tree. Also any +request which are marked with REQ_NOIDLE go on this service tree. On this +tree we do not idle on individual queues instead idle on the whole group +of queues or the tree. So if there are 4 queues waiting for IO to dispatch +we will idle only once last queue has dispatched the IO and there is +no more IO on this service tree. + +All async writes go on async service tree. There is no idling on async +queues. + +CFQ has some optimizations for SSDs and if it detects a non-rotational +media which can support higher queue depth (multiple requests at in +flight at a time), then it cuts down on idling of individual queues and +all the queues move to sync-noidle tree and only tree idle remains. This +tree idling provides isolation with buffered write queues on async tree. + +FAQ +=== +Q1. Why to idle at all on queues marked with REQ_NOIDLE. + +A1. We only do tree idle (all queues on sync-noidle tree) on queues marked + with REQ_NOIDLE. This helps in providing isolation with all the sync-idle + queues. Otherwise in presence of many sequential readers, other + synchronous IO might not get fair share of disk. + + For example, if there are 10 sequential readers doing IO and they get + 100ms each. If a REQ_NOIDLE request comes in, it will be scheduled + roughly after 1 second. If after completion of REQ_NOIDLE request we + do not idle, and after a couple of milli seconds a another REQ_NOIDLE + request comes in, again it will be scheduled after 1second. Repeat it + and notice how a workload can lose its disk share and suffer due to + multiple sequential readers. + + fsync can generate dependent IO where bunch of data is written in the + context of fsync, and later some journaling data is written. Journaling + data comes in only after fsync has finished its IO (atleast for ext4 + that seemed to be the case). Now if one decides not to idle on fsync + thread due to REQ_NOIDLE, then next journaling write will not get + scheduled for another second. A process doing small fsync, will suffer + badly in presence of multiple sequential readers. + + Hence doing tree idling on threads using REQ_NOIDLE flag on requests + provides isolation from multiple sequential readers and at the same + time we do not idle on individual threads. + +Q2. When to specify REQ_NOIDLE +A2. I would think whenever one is doing synchronous write and not expecting + more writes to be dispatched from same context soon, should be able + to specify REQ_NOIDLE on writes and that probably should work well for + most of the cases. -- cgit v0.10.2 From 3d56c8e6b039f2bc1d7c62d3b19e696c3129c8c0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 5 Aug 2011 12:30:12 +0200 Subject: ALSA: hdspm - Fix uninitialized compile warnings Put the exception checks for io_type switch() for possible mistakes in future. Also this shuts up annoying compile warnings. Signed-off-by: Takashi Iwai diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 6edc67c..493e394 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -1339,6 +1339,10 @@ static u64 hdspm_calc_dds_value(struct hdspm *hdspm, u64 period) break; case MADIface: freq_const = 131072000000000ULL; + break; + default: + snd_BUG(); + return 0; } return div_u64(freq_const, period); @@ -1356,16 +1360,19 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate) switch (hdspm->io_type) { case MADIface: - n = 131072000000000ULL; /* 125 MHz */ - break; + n = 131072000000000ULL; /* 125 MHz */ + break; case MADI: case AES32: - n = 110069313433624ULL; /* 105 MHz */ - break; + n = 110069313433624ULL; /* 105 MHz */ + break; case RayDAT: case AIO: - n = 104857600000000ULL; /* 100 MHz */ - break; + n = 104857600000000ULL; /* 100 MHz */ + break; + default: + snd_BUG(); + return; } n = div_u64(n, rate); -- cgit v0.10.2 From 81c0a78b644f0e265a01d5a5f5ab397b791bad08 Mon Sep 17 00:00:00 2001 From: Wang Shaoyan Date: Fri, 5 Aug 2011 18:51:29 +0800 Subject: ALSA: hda - Fix a complile warning in patch_via.c sound/pci/hda/patch_via.c:2087: warning: 'dac' may be used uninitialized in this function Signed-off-by: Wang Shaoyan Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 84d8798..4ebfbd8 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -2084,7 +2084,7 @@ static int via_auto_create_speaker_ctls(struct hda_codec *codec) struct via_spec *spec = codec->spec; struct nid_path *path; bool check_dac; - hda_nid_t pin, dac; + hda_nid_t pin, dac = 0; int err; pin = spec->autocfg.speaker_pins[0]; -- cgit v0.10.2 From e1b96ada659431669efaf3defa997abf5db68130 Mon Sep 17 00:00:00 2001 From: Jason Liu Date: Fri, 5 Aug 2011 23:34:32 +0800 Subject: ARM: iMX5: Don't enable DPLL if it already enabled If the DPLL is already enabled, don't try to enable it again. Since write to the DPLL control register will make the DPLL reset and which will cause some issues when some child module are sourced from this DPLL. Signed-off-by: Jason Liu Cc: Sascha Hauer Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c index 6b89c1b..0856482 100644 --- a/arch/arm/mach-mx5/clock-mx51-mx53.c +++ b/arch/arm/mach-mx5/clock-mx51-mx53.c @@ -271,7 +271,11 @@ static int _clk_pll_enable(struct clk *clk) int i = 0; pllbase = _get_pll_base(clk); - reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN; + reg = __raw_readl(pllbase + MXC_PLL_DP_CTL); + if (reg & MXC_PLL_DP_CTL_UPEN) + return 0; + + reg |= MXC_PLL_DP_CTL_UPEN; __raw_writel(reg, pllbase + MXC_PLL_DP_CTL); /* Wait for lock */ -- cgit v0.10.2 From 2ab1ba68aeaecd41c4b34f0eaf1d70a37367fb1a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 4 Aug 2011 14:28:36 -0400 Subject: Btrfs: force unplugs when switching from high to regular priority bios Btrfs does bio submissions from a worker thread, and each device has a list of high priority bios and regular priority bios. Synchronous writes go to the high priority thread while async writes go to regular list. This commit brings back an explicit unplug any time we switch from high to regular priority, which makes it easier for the block layer to give us low latencies. Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 53875ae73..3c5f2fc 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -142,6 +142,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) unsigned long limit; unsigned long last_waited = 0; int force_reg = 0; + int sync_pending; struct blk_plug plug; /* @@ -229,6 +230,22 @@ loop_lock: BUG_ON(atomic_read(&cur->bi_cnt) == 0); + /* + * if we're doing the sync list, record that our + * plug has some sync requests on it + * + * If we're doing the regular list and there are + * sync requests sitting around, unplug before + * we add more + */ + if (pending_bios == &device->pending_sync_bios) { + sync_pending = 1; + } else if (sync_pending) { + blk_finish_plug(&plug); + blk_start_plug(&plug); + sync_pending = 0; + } + submit_bio(cur->bi_rw, cur); num_run++; batch_run++; -- cgit v0.10.2 From 44e51b29228cdd30bb8f54a42eb34ea0674d5f96 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Mon, 11 Jul 2011 16:40:41 -0700 Subject: OMAP2+: Kconfig: don't select PM in OMAP2PLUS_TYPICAL CONFIG_PM is no longer a user-selectable Kconfig option. Rather it is automatically enabled if either CONFIG_SUSPEND or CONFIG_RUNTIME_PM is enabled, so having a 'select PM' here is redunant when 'select CONFIG_PM_RUNTIME' is present. Signed-off-by: Kevin Hilman diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 4ae6257..57b66d5 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -7,7 +7,6 @@ config ARCH_OMAP2PLUS_TYPICAL default y select AEABI select REGULATOR - select PM select PM_RUNTIME select VFP select NEON if ARCH_OMAP3 || ARCH_OMAP4 -- cgit v0.10.2 From e13d8f383985b7ab8c859c1fa327e9b3d201894e Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Sat, 9 Jul 2011 14:37:21 -0700 Subject: OMAP3+: SR: ensure pm-runtime callbacks can be invoked with IRQs disabled SmartReflex should be disabled while entering low power mode due to a) SmartReflex values are not defined for retention voltage, further b) with SmartReflex enabled, if CPU enters lower c-states, FSM will try to bump the voltage to current OPP's voltage for which it has entered c-state; hence SmartReflex needs to be disabled for MPU, CORE and IVA voltage domains in idle path before enabling auto retention voltage achievement on the device. However, since the current pm_runtime setup for SmartReflex devices are setup to allow callbacks to be invoked with interrupts enabled, calling SmartReflex enable/disable from other contexts such as idle paths where preemption is disabled causes warnings such as the following indicating of a potential race. [ 82.023895] [] (__irq_svc+0x3c/0x120) from [] (_raw_spin_unlock_irq+0x28/0x2c) [ 82.023895] [] (_raw_spin_unlock_irq+0x28/0x2c) from [] (rpm_callback+0x4c/0x68) [ 82.023956] [] (rpm_callback+0x4c/0x68) from [] (rpm_resume+0x338/0x53c) [ 82.023956] [] (rpm_resume+0x338/0x53c) from [] (__pm_runtime_resume+0x48/0x60) [ 82.023986] [] (__pm_runtime_resume+0x48/0x60) from [] (sr_enable+0xa8/0x19c) [ 82.023986] [] (sr_enable+0xa8/0x19c) from [] (omap_sr_enable+0x50/0x90) [ 82.024017] [] (omap_sr_enable+0x50/0x90) from [] (omap4_enter_sleep+0x138/0x168) Instead, we use pm_runtime_irq_safe to tell the PM core that callbacks can be invoked in interrupt disabled contexts. Acked-by: Rajendra Nayak Signed-off-by: Nishanth Menon [khilman@ti.com: minor changelog edits] Signed-off-by: Kevin Hilman diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 2ce2fb7..dc8e86a 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c @@ -860,6 +860,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); pm_runtime_enable(&pdev->dev); + pm_runtime_irq_safe(&pdev->dev); sr_info->pdev = pdev; sr_info->srid = pdev->id; -- cgit v0.10.2 From b66a4026d43d5766bcabed36106d254c727a77ec Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Tue, 7 Jun 2011 17:02:46 -0700 Subject: OMAP1: enable GENERIC_IRQ_CHIP OMAP1 needs this also since GPIO driver (common for all OMAPs) is being converted to use generic IRQ chip. Signed-off-by: Kevin Hilman diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index 6e6735f..bb8f4a6 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -13,6 +13,7 @@ config ARCH_OMAP1 bool "TI OMAP1" select CLKDEV_LOOKUP select CLKSRC_MMIO + select GENERIC_IRQ_CHIP help "Systems based on omap7xx, omap15xx or omap16xx" -- cgit v0.10.2 From 8c7f65943dfb93c2583f935f20ec39e8559d93d3 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Thu, 21 Jul 2011 11:43:48 -0700 Subject: OMAP3: beagle: don't touch omap_device internals Board code should not touch omap_device internals. To get the MPU/IVA devices, use existing APIs: omap2_get_mpu_device(), omap2_get_iva_device(). Signed-off-by: Kevin Hilman diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 32f5f89..3ae16b4 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -491,23 +491,22 @@ static void __init beagle_opp_init(void) /* Custom OPP enabled for all xM versions */ if (cpu_is_omap3630()) { - struct omap_hwmod *mh = omap_hwmod_lookup("mpu"); - struct omap_hwmod *dh = omap_hwmod_lookup("iva"); - struct device *dev; + struct device *mpu_dev, *iva_dev; - if (!mh || !dh) { + mpu_dev = omap2_get_mpuss_device(); + iva_dev = omap2_get_iva_device(); + + if (!mpu_dev || !iva_dev) { pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n", - __func__, mh, dh); + __func__, mpu_dev, iva_dev); return; } /* Enable MPU 1GHz and lower opps */ - dev = &mh->od->pdev.dev; - r = opp_enable(dev, 800000000); + r = opp_enable(mpu_dev, 800000000); /* TODO: MPU 1GHz needs SR and ABB */ /* Enable IVA 800MHz and lower opps */ - dev = &dh->od->pdev.dev; - r |= opp_enable(dev, 660000000); + r |= opp_enable(iva_dev, 660000000); /* TODO: DSP 800MHz needs SR and ABB */ if (r) { pr_err("%s: failed to enable higher opp %d\n", @@ -516,10 +515,8 @@ static void __init beagle_opp_init(void) * Cleanup - disable the higher freqs - we dont care * about the results */ - dev = &mh->od->pdev.dev; - opp_disable(dev, 800000000); - dev = &dh->od->pdev.dev; - opp_disable(dev, 660000000); + opp_disable(mpu_dev, 800000000); + opp_disable(iva_dev, 660000000); } } return; -- cgit v0.10.2 From 98333b3dda1e490b02304a6a6643c294d4e520c3 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Fri, 22 Jul 2011 00:55:52 -0500 Subject: OMAP2+: PM: SmartReflex: use put_sync_suspend for IRQ-safe disabling omap_sr_disable_reset_volt is called with irqs off in omapx_enter_sleep, as part of idle sequence, this eventually calls sr_disable and pm_runtime_put_sync. pm_runtime_put_sync calls rpm_idle, which will enable interrupts in order to call the callback. In this short interval when interrupts are enabled, scenarios such as the following can occur: while interrupts are enabled, the timer interrupt that is supposed to wake the device out of idle occurs and is acked, so when the CPU finally goes to off, the timer is already gone, missing a wakeup event. Further, as the documentation for runtime states:" However, subsystems can use the pm_runtime_irq_safe() helper function to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume() callbacks should be invoked in atomic context with interrupts disabled (->runtime_idle() is still invoked the default way)." Hence, replace pm_runtime_put_sync with pm_runtime_put_sync_suspend to invoke the suspend handler and shut off the fclk for SmartReflex module instead of using the idle handler in interrupt disabled context. Signed-off-by: Nishanth Menon Signed-off-by: Colin Cross [khilman@ti.com: minor Subject edits] Signed-off-by: Kevin Hilman diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index dc8e86a..34c01a7 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c @@ -621,7 +621,7 @@ void sr_disable(struct voltagedomain *voltdm) sr_v2_disable(sr); } - pm_runtime_put_sync(&sr->pdev->dev); + pm_runtime_put_sync_suspend(&sr->pdev->dev); } /** -- cgit v0.10.2 From a3ea14df0e383f44dcb2e61badb71180dbffe526 Mon Sep 17 00:00:00 2001 From: Paul Fox Date: Tue, 26 Jul 2011 16:42:26 +0100 Subject: x86, olpc: Wait for last byte of EC command to be accepted When executing EC commands, only waiting when there are still more bytes to write is usually fine. However, if the system suspends very quickly after a call to olpc_ec_cmd(), the last data byte may not yet be transferred to the EC, and the command will not complete. This solves a bug where the SCI wakeup mask was not correctly written when going into suspend. It means that sometimes, on XO-1.5 (but not XO-1), the devices that were marked as wakeup sources can't wake up the system. e.g. you ask for wifi wakeups, suspend, but then incoming wifi frames don't wake up the system as they should. Signed-off-by: Paul Fox Signed-off-by: Daniel Drake Acked-by: Andres Salomon Cc: Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c index 8b9940e..7cce722 100644 --- a/arch/x86/platform/olpc/olpc.c +++ b/arch/x86/platform/olpc/olpc.c @@ -161,13 +161,13 @@ restart: if (inbuf && inlen) { /* write data to EC */ for (i = 0; i < inlen; i++) { + pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); + outb(inbuf[i], 0x68); if (wait_on_ibf(0x6c, 0)) { printk(KERN_ERR "olpc-ec: timeout waiting for" " EC accept data!\n"); goto err; } - pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]); - outb(inbuf[i], 0x68); } } if (outbuf && outlen) { -- cgit v0.10.2 From 05e33fc20ea5e493a2a1e7f1d04f43cdf89f83ed Mon Sep 17 00:00:00 2001 From: Jack Steiner Date: Fri, 5 Aug 2011 09:09:00 -0500 Subject: x86, UV: Remove UV delay in starting slave cpus Delete the 10 msec delay between the INIT and SIPI when starting slave cpus. I can find no requirement for this delay. BIOS also has similar code sequences without the delay. Removing the delay reduces boot time by 40 sec. Every bit helps. Signed-off-by: Jack Steiner Cc: Link: http://lkml.kernel.org/r/20110805140900.GA6774@sgi.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index adc66c3..34b1859 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -207,7 +207,6 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | APIC_DM_INIT; uv_write_global_mmr64(pnode, UVH_IPI_INT, val); - mdelay(10); val = (1UL << UVH_IPI_INT_SEND_SHFT) | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | -- cgit v0.10.2 From b77f0f3c1f587791aa5d9bd1b0012c9a89eb9258 Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Fri, 5 Aug 2011 16:40:40 -0400 Subject: jump label: Reduce the cycle count by changing the link order In the course of testing jump labels for use with the CFS bandwidth controller, Paul Turner, discovered that using jump labels reduced the branch count and the instruction count, but did not reduce the cycle count or wall time. I noticed that having the jump_label.o included in the kernel but not used in any way still caused this increase in cycle count and wall time. Thus, I moved jump_label.o in the kernel/Makefile, thus changing the link order, and presumably moving it out of hot icache areas. This brought down the cycle count/time as expected. In addition to Paul's testing, I've tested the patch using a single 'static_branch()' in the getppid() path, and basically running tight loops of calls to getppid(). Here are my results for the branch disabled case: With jump labels turned on (CONFIG_JUMP_LABEL), branch disabled: Performance counter stats for 'bash -c /tmp/getppid;true' (50 runs): 3,969,510,217 instructions # 0.864 IPC ( +-0.000% ) 4,592,334,954 cycles ( +- 0.046% ) 751,634,470 branches ( +- 0.000% ) 1.722635797 seconds time elapsed ( +- 0.046% ) Jump labels turned off (CONFIG_JUMP_LABEL not set), branch disabled: Performance counter stats for 'bash -c /tmp/getppid;true' (50 runs): 4,009,611,846 instructions # 0.867 IPC ( +-0.000% ) 4,622,210,580 cycles ( +- 0.012% ) 771,662,904 branches ( +- 0.000% ) 1.734341454 seconds time elapsed ( +- 0.022% ) Signed-off-by: Jason Baron Cc: rth@redhat.com Cc: a.p.zijlstra@chello.nl Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/20110805204040.GG2522@redhat.com Signed-off-by: Ingo Molnar Tested-by: Paul Turner diff --git a/kernel/Makefile b/kernel/Makefile index d06467f..eca595e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \ - async.o range.o jump_label.o + async.o range.o obj-y += groups.o ifdef CONFIG_FUNCTION_TRACER @@ -107,6 +107,7 @@ obj-$(CONFIG_PERF_EVENTS) += events/ obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o obj-$(CONFIG_PADATA) += padata.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is -- cgit v0.10.2 From dac8f847c48adeeece5aba57600d08d2499d39b2 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sat, 6 Aug 2011 00:23:18 +0200 Subject: ALSA: snd-usb: Fix uninitialized variable usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Purely cosmetic, but fixes the following build warning. CC [M] sound/usb/quirks.o sound/usb/quirks.c: In function ‘snd_usb_apply_boot_quirk’: sound/usb/quirks.c:429:6: warning: ‘err’ may be used uninitialized in this function [-Wuninitialized] Signed-off-by: Daniel Mack Signed-off-by: Takashi Iwai diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 77762c9..81e07d8 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -426,7 +426,7 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev) */ static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) { - int err, reg; + int err = 0, reg; int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000}; for (reg = 0; reg < ARRAY_SIZE(val); reg++) { -- cgit v0.10.2 From f4389489b5cbe60b3441869c68bb4afe760969c4 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sat, 6 Aug 2011 09:13:08 +0200 Subject: ALSA: snd-usb-caiaq: Fix keymap for RigKontrol3 Signed-off-by: Daniel Mack Reported-by: Renato Cc: stable@kernel.org Signed-off-by: Takashi Iwai diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c index 4432ef7..a213813 100644 --- a/sound/usb/caiaq/input.c +++ b/sound/usb/caiaq/input.c @@ -30,7 +30,7 @@ static unsigned short keycode_ak1[] = { KEY_C, KEY_B, KEY_A }; static unsigned short keycode_rk2[] = { KEY_1, KEY_2, KEY_3, KEY_4, KEY_5, KEY_6, KEY_7 }; static unsigned short keycode_rk3[] = { KEY_1, KEY_2, KEY_3, KEY_4, - KEY_5, KEY_6, KEY_7, KEY_5, KEY_6 }; + KEY_5, KEY_6, KEY_7, KEY_8, KEY_9 }; static unsigned short keycode_kore[] = { KEY_FN_F1, /* "menu" */ -- cgit v0.10.2 From 67ada8367c323ce13d0268c87cf09bf8af956e92 Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Sat, 6 Aug 2011 13:26:20 +0200 Subject: ALSA: asihpi - use kzalloc() Use kzalloc rather than kmalloc followed by memset with 0 This considers some simple cases that are common and easy to validate Note in particular that there are no ...s in the rule, so all of the matched code has to be contiguous The semantic patch that makes this output is available in scripts/coccinelle/api/alloc/kzalloc-simple.cocci. More information about semantic patching is available at http://coccinelle.lip6.fr/ Signed-off-by: Thomas Meyer Signed-off-by: Takashi Iwai diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c index 65b7ca1..bd47521 100644 --- a/sound/pci/asihpi/hpicmn.c +++ b/sound/pci/asihpi/hpicmn.c @@ -631,13 +631,12 @@ struct hpi_control_cache *hpi_alloc_control_cache(const u32 control_count, if (!p_cache) return NULL; - p_cache->p_info = - kmalloc(sizeof(*p_cache->p_info) * control_count, GFP_KERNEL); + p_cache->p_info = kzalloc(sizeof(*p_cache->p_info) * control_count, + GFP_KERNEL); if (!p_cache->p_info) { kfree(p_cache); return NULL; } - memset(p_cache->p_info, 0, sizeof(*p_cache->p_info) * control_count); p_cache->cache_size_in_bytes = size_in_bytes; p_cache->control_count = control_count; p_cache->p_cache = p_dsp_control_buffer; -- cgit v0.10.2 From df944f66784e6d4f2f50739263a4947885d8b6ae Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 7 Aug 2011 17:34:07 +0200 Subject: ALSA: Fix dependency of CONFIG_SND_TEA575X CONFIG_SND_TEA575X is enabled by RADIO_SF16FMR2, but the latter one is no PCI device. Since tea575x-tuner itself is independent from the board bus type, the config should be moved out of SND_PCI dependency. Reported-by: Randy Dunlap Acked-by: Randy Dunlap Signed-off-by: Takashi Iwai diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index 50abf5b..8816804 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig @@ -1,5 +1,10 @@ # ALSA PCI drivers +config SND_TEA575X + tristate + depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 + default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 + menuconfig SND_PCI bool "PCI sound devices" depends on PCI @@ -563,11 +568,6 @@ config SND_FM801_TEA575X_BOOL FM801 chip with a TEA5757 tuner (MediaForte SF256-PCS, SF256-PCP and SF64-PCR) into the snd-fm801 driver. -config SND_TEA575X - tristate - depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 - default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 - source "sound/pci/hda/Kconfig" config SND_HDSP -- cgit v0.10.2 From 4d81897139ffb738ee14b6f84f63f93ecda1136b Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sat, 6 Aug 2011 23:38:30 +0900 Subject: TOMOYO: Fix incomplete read of /sys/kernel/security/tomoyo/profile Commit bd03a3e4 "TOMOYO: Add policy namespace support." forgot to set EOF flag and forgot to print namespace at PREFERENCE line. Signed-off-by: Tetsuo Handa Signed-off-by: James Morris diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index c8439cf2..2e43aec 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -710,8 +710,10 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) head->r.index++) if (ns->profile_ptr[head->r.index]) break; - if (head->r.index == TOMOYO_MAX_PROFILES) + if (head->r.index == TOMOYO_MAX_PROFILES) { + head->r.eof = true; return; + } head->r.step++; break; case 2: @@ -723,6 +725,7 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) tomoyo_io_printf(head, "%u-COMMENT=", index); tomoyo_set_string(head, comment ? comment->name : ""); tomoyo_set_lf(head); + tomoyo_print_namespace(head); tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); for (i = 0; i < TOMOYO_MAX_PREF; i++) tomoyo_io_printf(head, "%s=%u ", -- cgit v0.10.2 From fad54440438a7c231a6ae347738423cbabc936d9 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Fri, 5 Aug 2011 00:36:28 +0000 Subject: netfilter: avoid double free in nf_reinject NF_STOLEN means skb was already freed Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5b466cd..84d0fd4 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -312,6 +312,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) } break; case NF_STOLEN: + break; default: kfree_skb(skb); } -- cgit v0.10.2 From 614851601c121b1320a35757ab88292d6272f906 Mon Sep 17 00:00:00 2001 From: Matvejchikov Ilya Date: Fri, 5 Aug 2011 09:23:51 +0000 Subject: slip: fix NOHZ local_softirq_pending 08 warning When using nanosleep() in an userspace application we get a ratelimit warning: NOHZ: local_softirq_pending 08 According to 481a8199142c050b72bff8a1956a49fd0a75bbe0 the problem is caused by netif_rx() function. This patch replaces netif_rx() with netif_rx_ni() which has to be used from process/softirq context. Signed-off-by: Matvejchikov Ilya Signed-off-by: David S. Miller diff --git a/drivers/net/slip.c b/drivers/net/slip.c index f11b3f3..4c61753 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -367,7 +367,7 @@ static void sl_bump(struct slip *sl) memcpy(skb_put(skb, count), sl->rbuff, count); skb_reset_mac_header(skb); skb->protocol = htons(ETH_P_IP); - netif_rx(skb); + netif_rx_ni(skb); dev->stats.rx_packets++; } -- cgit v0.10.2 From 025890b4ed433b9c9e0f221bb806d42d049c87fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20de=20Peslo=C3=BCan?= Date: Sat, 6 Aug 2011 07:06:39 +0000 Subject: bonding: document two undocumented options. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 655f8919d549ad1872e24d826b6ce42530516d2e bonding: add min links parameter to 802.3ad and commit ebd8e4977a87cb81d93c62a9bff0102a9713722f bonding: add all_slaves_active parameter introduced new options to bonding, but didn't provide the documentation for those options. V2: add the default value for both options. V3: document the exact behavior of min_links default value. Signed-off-by: Nicolas de Pesloüan Signed-off-by: David S. Miller diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 5dd960d..91df678 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -238,6 +238,18 @@ ad_select This option was added in bonding version 3.4.0. +all_slaves_active + + Specifies that duplicate frames (received on inactive ports) should be + dropped (0) or delivered (1). + + Normally, bonding will drop duplicate frames (received on inactive + ports), which is desirable for most users. But there are some times + it is nice to allow duplicate frames to be delivered. + + The default value is 0 (drop duplicate frames received on inactive + ports). + arp_interval Specifies the ARP link monitoring frequency in milliseconds. @@ -433,6 +445,23 @@ miimon determined. See the High Availability section for additional information. The default value is 0. +min_links + + Specifies the minimum number of links that must be active before + asserting carrier. It is similar to the Cisco EtherChannel min-links + feature. This allows setting the minimum number of member ports that + must be up (link-up state) before marking the bond device as up + (carrier on). This is useful for situations where higher level services + such as clustering want to ensure a minimum number of low bandwidth + links are active before switchover. This option only affect 802.3ad + mode. + + The default value is 0. This will cause carrier to be asserted (for + 802.3ad mode) whenever there is an active aggregator, regardless of the + number of available links in that aggregator. Note that, because an + aggregator cannot be active without at least one available link, + setting this option to 0 or to 1 has the exact same effect. + mode Specifies one of the bonding policies. The default is -- cgit v0.10.2 From d547f727df86059104af2234804fdd538e112015 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 7 Aug 2011 22:20:20 -0700 Subject: ipv4: fix the reusing of routing cache entries compare_keys and ip_route_input_common rely on rt_oif for distinguishing of input and output routes with same keys values. But sometimes the input route has also same hash chain (keyed by iif != 0) with the output routes (keyed by orig_oif=0). Problem visible if running with small number of rhash_entries. Fix them to use rt_route_iif instead. By this way input route can not be returned to users that request output route. The patch fixes the ip_rt_bug errors that were reported in ip_local_out context, mostly for 255.255.255.255 destinations. Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e3dec1c..cb7efe0 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -731,6 +731,7 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | (rt1->rt_mark ^ rt2->rt_mark) | (rt1->rt_key_tos ^ rt2->rt_key_tos) | + (rt1->rt_route_iif ^ rt2->rt_route_iif) | (rt1->rt_oif ^ rt2->rt_oif) | (rt1->rt_iif ^ rt2->rt_iif)) == 0; } @@ -2321,8 +2322,8 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | (rth->rt_iif ^ iif) | - rth->rt_oif | (rth->rt_key_tos ^ tos)) == 0 && + rt_is_input_route(rth) && rth->rt_mark == skb->mark && net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { -- cgit v0.10.2 From 8bab6f14084460d722f253221efa4148d3fc8b16 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 6 Aug 2011 12:12:04 +0000 Subject: compat_ioctl: add compat handler for PPPIOCGL2TPSTATS fixes following error seen on x86_64 kernel: ioctl32(openl2tpd:7480): Unknown cmd fd(14) cmd(80487436){t:'t';sz:72} arg(ffa7e6c0) on socket:[105094] The argument (struct pppol2tp_ioc_stats) uses "aligned_u64" and thus doesn't need fixups. Cc: James Chapman Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 8be086e..51352de 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -1003,6 +1003,7 @@ COMPATIBLE_IOCTL(PPPIOCCONNECT) COMPATIBLE_IOCTL(PPPIOCDISCONN) COMPATIBLE_IOCTL(PPPIOCATTCHAN) COMPATIBLE_IOCTL(PPPIOCGCHAN) +COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS) /* PPPOX */ COMPATIBLE_IOCTL(PPPOEIOCSFWD) COMPATIBLE_IOCTL(PPPOEIOCDFWD) -- cgit v0.10.2 From a9ba615134ad32254fae84f16e1751854755135c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 24 Jun 2011 12:10:44 +0100 Subject: ASoC: Rename WM8915 to WM8996 For marketing reasons the part will be called WM8996. In order to avoid user confusion rename the driver to reflect this. Signed-off-by: Mark Brown Acked-by: Kukjin Kim Acked-by: Liam Girdwood diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c index 9026249..af0c2fe 100644 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c @@ -65,7 +65,7 @@ #include #include -#include +#include #include #include @@ -614,7 +614,7 @@ static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = { .disable_touch = true, }; -static struct wm8915_retune_mobile_config wm8915_retune[] = { +static struct wm8996_retune_mobile_config wm8996_retune[] = { { .name = "Sub LPF", .rate = 48000, @@ -635,12 +635,12 @@ static struct wm8915_retune_mobile_config wm8915_retune[] = { }, }; -static struct wm8915_pdata wm8915_pdata __initdata = { +static struct wm8996_pdata wm8996_pdata __initdata = { .ldo_ena = S3C64XX_GPN(7), .gpio_base = CODEC_GPIO_BASE, .micdet_def = 1, - .inl_mode = WM8915_DIFFERRENTIAL_1, - .inr_mode = WM8915_DIFFERRENTIAL_1, + .inl_mode = WM8996_DIFFERRENTIAL_1, + .inr_mode = WM8996_DIFFERRENTIAL_1, .irq_flags = IRQF_TRIGGER_RISING, @@ -652,8 +652,8 @@ static struct wm8915_pdata wm8915_pdata __initdata = { 0x020e, /* GPIO5 == CLKOUT */ }, - .retune_mobile_cfgs = wm8915_retune, - .num_retune_mobile_cfgs = ARRAY_SIZE(wm8915_retune), + .retune_mobile_cfgs = wm8996_retune, + .num_retune_mobile_cfgs = ARRAY_SIZE(wm8996_retune), }; static struct wm8962_pdata wm8962_pdata __initdata = { @@ -679,8 +679,8 @@ static struct i2c_board_info i2c_devs1[] __initdata = { .platform_data = &glenfarclas_pmic_pdata }, { I2C_BOARD_INFO("wm1250-ev1", 0x27) }, - { I2C_BOARD_INFO("wm8915", 0x1a), - .platform_data = &wm8915_pdata, + { I2C_BOARD_INFO("wm8996", 0x1a), + .platform_data = &wm8996_pdata, .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2, }, { I2C_BOARD_INFO("wm9081", 0x6c), diff --git a/include/sound/wm8915.h b/include/sound/wm8915.h deleted file mode 100644 index 5817d76..0000000 --- a/include/sound/wm8915.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * linux/sound/wm8915.h -- Platform data for WM8915 - * - * Copyright 2011 Wolfson Microelectronics. PLC. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __LINUX_SND_WM8903_H -#define __LINUX_SND_WM8903_H - -enum wm8915_inmode { - WM8915_DIFFERRENTIAL_1 = 0, /* IN1xP - IN1xN */ - WM8915_INVERTING = 1, /* IN1xN */ - WM8915_NON_INVERTING = 2, /* IN1xP */ - WM8915_DIFFERENTIAL_2 = 3, /* IN2xP - IN2xP */ -}; - -/** - * ReTune Mobile configurations are specified with a label, sample - * rate and set of values to write (the enable bits will be ignored). - * - * Configurations are expected to be generated using the ReTune Mobile - * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/ - */ -struct wm8915_retune_mobile_config { - const char *name; - int rate; - u16 regs[20]; -}; - -#define WM8915_SET_DEFAULT 0x10000 - -struct wm8915_pdata { - int irq_flags; /** Set IRQ trigger flags; default active low */ - - int ldo_ena; /** GPIO for LDO1; -1 for none */ - - int micdet_def; /** Default MICDET_SRC/HP1FB_SRC/MICD_BIAS */ - - enum wm8915_inmode inl_mode; - enum wm8915_inmode inr_mode; - - u32 spkmute_seq; /** Value for register 0x802 */ - - int gpio_base; - u32 gpio_default[5]; - - int num_retune_mobile_cfgs; - struct wm8915_retune_mobile_config *retune_mobile_cfgs; -}; - -#endif diff --git a/include/sound/wm8996.h b/include/sound/wm8996.h new file mode 100644 index 0000000..ea4d88f --- /dev/null +++ b/include/sound/wm8996.h @@ -0,0 +1,55 @@ +/* + * linux/sound/wm8996.h -- Platform data for WM8996 + * + * Copyright 2011 Wolfson Microelectronics. PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_SND_WM8996_H +#define __LINUX_SND_WM8996_H + +enum wm8996_inmode { + WM8996_DIFFERRENTIAL_1 = 0, /* IN1xP - IN1xN */ + WM8996_INVERTING = 1, /* IN1xN */ + WM8996_NON_INVERTING = 2, /* IN1xP */ + WM8996_DIFFERENTIAL_2 = 3, /* IN2xP - IN2xP */ +}; + +/** + * ReTune Mobile configurations are specified with a label, sample + * rate and set of values to write (the enable bits will be ignored). + * + * Configurations are expected to be generated using the ReTune Mobile + * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/ + */ +struct wm8996_retune_mobile_config { + const char *name; + int rate; + u16 regs[20]; +}; + +#define WM8996_SET_DEFAULT 0x10000 + +struct wm8996_pdata { + int irq_flags; /** Set IRQ trigger flags; default active low */ + + int ldo_ena; /** GPIO for LDO1; -1 for none */ + + int micdet_def; /** Default MICDET_SRC/HP1FB_SRC/MICD_BIAS */ + + enum wm8996_inmode inl_mode; + enum wm8996_inmode inr_mode; + + u32 spkmute_seq; /** Value for register 0x802 */ + + int gpio_base; + u32 gpio_default[5]; + + int num_retune_mobile_cfgs; + struct wm8996_retune_mobile_config *retune_mobile_cfgs; +}; + +#endif diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 379b2e3..665d924 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -78,7 +78,6 @@ config SND_SOC_ALL_CODECS select SND_SOC_WM8900 if I2C select SND_SOC_WM8903 if I2C select SND_SOC_WM8904 if I2C - select SND_SOC_WM8915 if I2C select SND_SOC_WM8940 if I2C select SND_SOC_WM8955 if I2C select SND_SOC_WM8960 if I2C @@ -95,6 +94,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_WM8993 if I2C select SND_SOC_WM8994 if MFD_WM8994 select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI + select SND_SOC_WM8996 if I2C select SND_SOC_WM9081 if I2C select SND_SOC_WM9090 if I2C select SND_SOC_WM9705 if SND_SOC_AC97_BUS @@ -329,9 +329,6 @@ config SND_SOC_WM8903 config SND_SOC_WM8904 tristate -config SND_SOC_WM8915 - tristate - config SND_SOC_WM8940 tristate @@ -380,6 +377,9 @@ config SND_SOC_WM8994 config SND_SOC_WM8995 tristate +config SND_SOC_WM8996 + tristate + config SND_SOC_WM9081 tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index da9990f..5119a7e 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -63,7 +63,7 @@ snd-soc-wm8804-objs := wm8804.o snd-soc-wm8900-objs := wm8900.o snd-soc-wm8903-objs := wm8903.o snd-soc-wm8904-objs := wm8904.o -snd-soc-wm8915-objs := wm8915.o +snd-soc-wm8996-objs := wm8996.o snd-soc-wm8940-objs := wm8940.o snd-soc-wm8955-objs := wm8955.o snd-soc-wm8960-objs := wm8960.o @@ -160,7 +160,7 @@ obj-$(CONFIG_SND_SOC_WM8804) += snd-soc-wm8804.o obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o obj-$(CONFIG_SND_SOC_WM8904) += snd-soc-wm8904.o -obj-$(CONFIG_SND_SOC_WM8915) += snd-soc-wm8915.o +obj-$(CONFIG_SND_SOC_WM8996) += snd-soc-wm8996.o obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o obj-$(CONFIG_SND_SOC_WM8955) += snd-soc-wm8955.o obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c deleted file mode 100644 index 423baa9..0000000 --- a/sound/soc/codecs/wm8915.c +++ /dev/null @@ -1,2995 +0,0 @@ -/* - * wm8915.c - WM8915 audio codec interface - * - * Copyright 2011 Wolfson Microelectronics PLC. - * Author: Mark Brown - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "wm8915.h" - -#define WM8915_AIFS 2 - -#define HPOUT1L 1 -#define HPOUT1R 2 -#define HPOUT2L 4 -#define HPOUT2R 8 - -#define WM8915_NUM_SUPPLIES 4 -static const char *wm8915_supply_names[WM8915_NUM_SUPPLIES] = { - "DBVDD", - "AVDD1", - "AVDD2", - "CPVDD", -}; - -struct wm8915_priv { - struct snd_soc_codec *codec; - - int ldo1ena; - - int sysclk; - int sysclk_src; - - int fll_src; - int fll_fref; - int fll_fout; - - struct completion fll_lock; - - u16 dcs_pending; - struct completion dcs_done; - - u16 hpout_ena; - u16 hpout_pending; - - struct regulator_bulk_data supplies[WM8915_NUM_SUPPLIES]; - struct notifier_block disable_nb[WM8915_NUM_SUPPLIES]; - - struct wm8915_pdata pdata; - - int rx_rate[WM8915_AIFS]; - int bclk_rate[WM8915_AIFS]; - - /* Platform dependant ReTune mobile configuration */ - int num_retune_mobile_texts; - const char **retune_mobile_texts; - int retune_mobile_cfg[2]; - struct soc_enum retune_mobile_enum; - - struct snd_soc_jack *jack; - bool detecting; - bool jack_mic; - wm8915_polarity_fn polarity_cb; - -#ifdef CONFIG_GPIOLIB - struct gpio_chip gpio_chip; -#endif -}; - -/* We can't use the same notifier block for more than one supply and - * there's no way I can see to get from a callback to the caller - * except container_of(). - */ -#define WM8915_REGULATOR_EVENT(n) \ -static int wm8915_regulator_event_##n(struct notifier_block *nb, \ - unsigned long event, void *data) \ -{ \ - struct wm8915_priv *wm8915 = container_of(nb, struct wm8915_priv, \ - disable_nb[n]); \ - if (event & REGULATOR_EVENT_DISABLE) { \ - wm8915->codec->cache_sync = 1; \ - } \ - return 0; \ -} - -WM8915_REGULATOR_EVENT(0) -WM8915_REGULATOR_EVENT(1) -WM8915_REGULATOR_EVENT(2) -WM8915_REGULATOR_EVENT(3) - -static const u16 wm8915_reg[WM8915_MAX_REGISTER] = { - [WM8915_SOFTWARE_RESET] = 0x8915, - [WM8915_POWER_MANAGEMENT_7] = 0x10, - [WM8915_DAC1_HPOUT1_VOLUME] = 0x88, - [WM8915_DAC2_HPOUT2_VOLUME] = 0x88, - [WM8915_DAC1_LEFT_VOLUME] = 0x2c0, - [WM8915_DAC1_RIGHT_VOLUME] = 0x2c0, - [WM8915_DAC2_LEFT_VOLUME] = 0x2c0, - [WM8915_DAC2_RIGHT_VOLUME] = 0x2c0, - [WM8915_OUTPUT1_LEFT_VOLUME] = 0x80, - [WM8915_OUTPUT1_RIGHT_VOLUME] = 0x80, - [WM8915_OUTPUT2_LEFT_VOLUME] = 0x80, - [WM8915_OUTPUT2_RIGHT_VOLUME] = 0x80, - [WM8915_MICBIAS_1] = 0x39, - [WM8915_MICBIAS_2] = 0x39, - [WM8915_LDO_1] = 0x3, - [WM8915_LDO_2] = 0x13, - [WM8915_ACCESSORY_DETECT_MODE_1] = 0x4, - [WM8915_HEADPHONE_DETECT_1] = 0x20, - [WM8915_MIC_DETECT_1] = 0x7600, - [WM8915_MIC_DETECT_2] = 0xbf, - [WM8915_CHARGE_PUMP_1] = 0x1f25, - [WM8915_CHARGE_PUMP_2] = 0xab19, - [WM8915_DC_SERVO_5] = 0x2a2a, - [WM8915_CONTROL_INTERFACE_1] = 0x8004, - [WM8915_CLOCKING_1] = 0x10, - [WM8915_AIF_RATE] = 0x83, - [WM8915_FLL_CONTROL_4] = 0x5dc0, - [WM8915_FLL_CONTROL_5] = 0xc84, - [WM8915_FLL_EFS_2] = 0x2, - [WM8915_AIF1_TX_LRCLK_1] = 0x80, - [WM8915_AIF1_TX_LRCLK_2] = 0x8, - [WM8915_AIF1_RX_LRCLK_1] = 0x80, - [WM8915_AIF1TX_DATA_CONFIGURATION_1] = 0x1818, - [WM8915_AIF1RX_DATA_CONFIGURATION] = 0x1818, - [WM8915_AIF1TX_TEST] = 0x7, - [WM8915_AIF2_TX_LRCLK_1] = 0x80, - [WM8915_AIF2_TX_LRCLK_2] = 0x8, - [WM8915_AIF2_RX_LRCLK_1] = 0x80, - [WM8915_AIF2TX_DATA_CONFIGURATION_1] = 0x1818, - [WM8915_AIF2RX_DATA_CONFIGURATION] = 0x1818, - [WM8915_AIF2TX_TEST] = 0x1, - [WM8915_DSP1_TX_LEFT_VOLUME] = 0xc0, - [WM8915_DSP1_TX_RIGHT_VOLUME] = 0xc0, - [WM8915_DSP1_RX_LEFT_VOLUME] = 0xc0, - [WM8915_DSP1_RX_RIGHT_VOLUME] = 0xc0, - [WM8915_DSP1_TX_FILTERS] = 0x2000, - [WM8915_DSP1_RX_FILTERS_1] = 0x200, - [WM8915_DSP1_RX_FILTERS_2] = 0x10, - [WM8915_DSP1_DRC_1] = 0x98, - [WM8915_DSP1_DRC_2] = 0x845, - [WM8915_DSP1_RX_EQ_GAINS_1] = 0x6318, - [WM8915_DSP1_RX_EQ_GAINS_2] = 0x6300, - [WM8915_DSP1_RX_EQ_BAND_1_A] = 0xfca, - [WM8915_DSP1_RX_EQ_BAND_1_B] = 0x400, - [WM8915_DSP1_RX_EQ_BAND_1_PG] = 0xd8, - [WM8915_DSP1_RX_EQ_BAND_2_A] = 0x1eb5, - [WM8915_DSP1_RX_EQ_BAND_2_B] = 0xf145, - [WM8915_DSP1_RX_EQ_BAND_2_C] = 0xb75, - [WM8915_DSP1_RX_EQ_BAND_2_PG] = 0x1c5, - [WM8915_DSP1_RX_EQ_BAND_3_A] = 0x1c58, - [WM8915_DSP1_RX_EQ_BAND_3_B] = 0xf373, - [WM8915_DSP1_RX_EQ_BAND_3_C] = 0xa54, - [WM8915_DSP1_RX_EQ_BAND_3_PG] = 0x558, - [WM8915_DSP1_RX_EQ_BAND_4_A] = 0x168e, - [WM8915_DSP1_RX_EQ_BAND_4_B] = 0xf829, - [WM8915_DSP1_RX_EQ_BAND_4_C] = 0x7ad, - [WM8915_DSP1_RX_EQ_BAND_4_PG] = 0x1103, - [WM8915_DSP1_RX_EQ_BAND_5_A] = 0x564, - [WM8915_DSP1_RX_EQ_BAND_5_B] = 0x559, - [WM8915_DSP1_RX_EQ_BAND_5_PG] = 0x4000, - [WM8915_DSP2_TX_LEFT_VOLUME] = 0xc0, - [WM8915_DSP2_TX_RIGHT_VOLUME] = 0xc0, - [WM8915_DSP2_RX_LEFT_VOLUME] = 0xc0, - [WM8915_DSP2_RX_RIGHT_VOLUME] = 0xc0, - [WM8915_DSP2_TX_FILTERS] = 0x2000, - [WM8915_DSP2_RX_FILTERS_1] = 0x200, - [WM8915_DSP2_RX_FILTERS_2] = 0x10, - [WM8915_DSP2_DRC_1] = 0x98, - [WM8915_DSP2_DRC_2] = 0x845, - [WM8915_DSP2_RX_EQ_GAINS_1] = 0x6318, - [WM8915_DSP2_RX_EQ_GAINS_2] = 0x6300, - [WM8915_DSP2_RX_EQ_BAND_1_A] = 0xfca, - [WM8915_DSP2_RX_EQ_BAND_1_B] = 0x400, - [WM8915_DSP2_RX_EQ_BAND_1_PG] = 0xd8, - [WM8915_DSP2_RX_EQ_BAND_2_A] = 0x1eb5, - [WM8915_DSP2_RX_EQ_BAND_2_B] = 0xf145, - [WM8915_DSP2_RX_EQ_BAND_2_C] = 0xb75, - [WM8915_DSP2_RX_EQ_BAND_2_PG] = 0x1c5, - [WM8915_DSP2_RX_EQ_BAND_3_A] = 0x1c58, - [WM8915_DSP2_RX_EQ_BAND_3_B] = 0xf373, - [WM8915_DSP2_RX_EQ_BAND_3_C] = 0xa54, - [WM8915_DSP2_RX_EQ_BAND_3_PG] = 0x558, - [WM8915_DSP2_RX_EQ_BAND_4_A] = 0x168e, - [WM8915_DSP2_RX_EQ_BAND_4_B] = 0xf829, - [WM8915_DSP2_RX_EQ_BAND_4_C] = 0x7ad, - [WM8915_DSP2_RX_EQ_BAND_4_PG] = 0x1103, - [WM8915_DSP2_RX_EQ_BAND_5_A] = 0x564, - [WM8915_DSP2_RX_EQ_BAND_5_B] = 0x559, - [WM8915_DSP2_RX_EQ_BAND_5_PG] = 0x4000, - [WM8915_OVERSAMPLING] = 0xd, - [WM8915_SIDETONE] = 0x1040, - [WM8915_GPIO_1] = 0xa101, - [WM8915_GPIO_2] = 0xa101, - [WM8915_GPIO_3] = 0xa101, - [WM8915_GPIO_4] = 0xa101, - [WM8915_GPIO_5] = 0xa101, - [WM8915_PULL_CONTROL_2] = 0x140, - [WM8915_INTERRUPT_STATUS_1_MASK] = 0x1f, - [WM8915_INTERRUPT_STATUS_2_MASK] = 0x1ecf, - [WM8915_RIGHT_PDM_SPEAKER] = 0x1, - [WM8915_PDM_SPEAKER_MUTE_SEQUENCE] = 0x69, - [WM8915_PDM_SPEAKER_VOLUME] = 0x66, - [WM8915_WRITE_SEQUENCER_0] = 0x1, - [WM8915_WRITE_SEQUENCER_1] = 0x1, - [WM8915_WRITE_SEQUENCER_3] = 0x6, - [WM8915_WRITE_SEQUENCER_4] = 0x40, - [WM8915_WRITE_SEQUENCER_5] = 0x1, - [WM8915_WRITE_SEQUENCER_6] = 0xf, - [WM8915_WRITE_SEQUENCER_7] = 0x6, - [WM8915_WRITE_SEQUENCER_8] = 0x1, - [WM8915_WRITE_SEQUENCER_9] = 0x3, - [WM8915_WRITE_SEQUENCER_10] = 0x104, - [WM8915_WRITE_SEQUENCER_12] = 0x60, - [WM8915_WRITE_SEQUENCER_13] = 0x11, - [WM8915_WRITE_SEQUENCER_14] = 0x401, - [WM8915_WRITE_SEQUENCER_16] = 0x50, - [WM8915_WRITE_SEQUENCER_17] = 0x3, - [WM8915_WRITE_SEQUENCER_18] = 0x100, - [WM8915_WRITE_SEQUENCER_20] = 0x51, - [WM8915_WRITE_SEQUENCER_21] = 0x3, - [WM8915_WRITE_SEQUENCER_22] = 0x104, - [WM8915_WRITE_SEQUENCER_23] = 0xa, - [WM8915_WRITE_SEQUENCER_24] = 0x60, - [WM8915_WRITE_SEQUENCER_25] = 0x3b, - [WM8915_WRITE_SEQUENCER_26] = 0x502, - [WM8915_WRITE_SEQUENCER_27] = 0x100, - [WM8915_WRITE_SEQUENCER_28] = 0x2fff, - [WM8915_WRITE_SEQUENCER_32] = 0x2fff, - [WM8915_WRITE_SEQUENCER_36] = 0x2fff, - [WM8915_WRITE_SEQUENCER_40] = 0x2fff, - [WM8915_WRITE_SEQUENCER_44] = 0x2fff, - [WM8915_WRITE_SEQUENCER_48] = 0x2fff, - [WM8915_WRITE_SEQUENCER_52] = 0x2fff, - [WM8915_WRITE_SEQUENCER_56] = 0x2fff, - [WM8915_WRITE_SEQUENCER_60] = 0x2fff, - [WM8915_WRITE_SEQUENCER_64] = 0x1, - [WM8915_WRITE_SEQUENCER_65] = 0x1, - [WM8915_WRITE_SEQUENCER_67] = 0x6, - [WM8915_WRITE_SEQUENCER_68] = 0x40, - [WM8915_WRITE_SEQUENCER_69] = 0x1, - [WM8915_WRITE_SEQUENCER_70] = 0xf, - [WM8915_WRITE_SEQUENCER_71] = 0x6, - [WM8915_WRITE_SEQUENCER_72] = 0x1, - [WM8915_WRITE_SEQUENCER_73] = 0x3, - [WM8915_WRITE_SEQUENCER_74] = 0x104, - [WM8915_WRITE_SEQUENCER_76] = 0x60, - [WM8915_WRITE_SEQUENCER_77] = 0x11, - [WM8915_WRITE_SEQUENCER_78] = 0x401, - [WM8915_WRITE_SEQUENCER_80] = 0x50, - [WM8915_WRITE_SEQUENCER_81] = 0x3, - [WM8915_WRITE_SEQUENCER_82] = 0x100, - [WM8915_WRITE_SEQUENCER_84] = 0x60, - [WM8915_WRITE_SEQUENCER_85] = 0x3b, - [WM8915_WRITE_SEQUENCER_86] = 0x502, - [WM8915_WRITE_SEQUENCER_87] = 0x100, - [WM8915_WRITE_SEQUENCER_88] = 0x2fff, - [WM8915_WRITE_SEQUENCER_92] = 0x2fff, - [WM8915_WRITE_SEQUENCER_96] = 0x2fff, - [WM8915_WRITE_SEQUENCER_100] = 0x2fff, - [WM8915_WRITE_SEQUENCER_104] = 0x2fff, - [WM8915_WRITE_SEQUENCER_108] = 0x2fff, - [WM8915_WRITE_SEQUENCER_112] = 0x2fff, - [WM8915_WRITE_SEQUENCER_116] = 0x2fff, - [WM8915_WRITE_SEQUENCER_120] = 0x2fff, - [WM8915_WRITE_SEQUENCER_124] = 0x2fff, - [WM8915_WRITE_SEQUENCER_128] = 0x1, - [WM8915_WRITE_SEQUENCER_129] = 0x1, - [WM8915_WRITE_SEQUENCER_131] = 0x6, - [WM8915_WRITE_SEQUENCER_132] = 0x40, - [WM8915_WRITE_SEQUENCER_133] = 0x1, - [WM8915_WRITE_SEQUENCER_134] = 0xf, - [WM8915_WRITE_SEQUENCER_135] = 0x6, - [WM8915_WRITE_SEQUENCER_136] = 0x1, - [WM8915_WRITE_SEQUENCER_137] = 0x3, - [WM8915_WRITE_SEQUENCER_138] = 0x106, - [WM8915_WRITE_SEQUENCER_140] = 0x61, - [WM8915_WRITE_SEQUENCER_141] = 0x11, - [WM8915_WRITE_SEQUENCER_142] = 0x401, - [WM8915_WRITE_SEQUENCER_144] = 0x50, - [WM8915_WRITE_SEQUENCER_145] = 0x3, - [WM8915_WRITE_SEQUENCER_146] = 0x102, - [WM8915_WRITE_SEQUENCER_148] = 0x51, - [WM8915_WRITE_SEQUENCER_149] = 0x3, - [WM8915_WRITE_SEQUENCER_150] = 0x106, - [WM8915_WRITE_SEQUENCER_151] = 0xa, - [WM8915_WRITE_SEQUENCER_152] = 0x61, - [WM8915_WRITE_SEQUENCER_153] = 0x3b, - [WM8915_WRITE_SEQUENCER_154] = 0x502, - [WM8915_WRITE_SEQUENCER_155] = 0x100, - [WM8915_WRITE_SEQUENCER_156] = 0x2fff, - [WM8915_WRITE_SEQUENCER_160] = 0x2fff, - [WM8915_WRITE_SEQUENCER_164] = 0x2fff, - [WM8915_WRITE_SEQUENCER_168] = 0x2fff, - [WM8915_WRITE_SEQUENCER_172] = 0x2fff, - [WM8915_WRITE_SEQUENCER_176] = 0x2fff, - [WM8915_WRITE_SEQUENCER_180] = 0x2fff, - [WM8915_WRITE_SEQUENCER_184] = 0x2fff, - [WM8915_WRITE_SEQUENCER_188] = 0x2fff, - [WM8915_WRITE_SEQUENCER_192] = 0x1, - [WM8915_WRITE_SEQUENCER_193] = 0x1, - [WM8915_WRITE_SEQUENCER_195] = 0x6, - [WM8915_WRITE_SEQUENCER_196] = 0x40, - [WM8915_WRITE_SEQUENCER_197] = 0x1, - [WM8915_WRITE_SEQUENCER_198] = 0xf, - [WM8915_WRITE_SEQUENCER_199] = 0x6, - [WM8915_WRITE_SEQUENCER_200] = 0x1, - [WM8915_WRITE_SEQUENCER_201] = 0x3, - [WM8915_WRITE_SEQUENCER_202] = 0x106, - [WM8915_WRITE_SEQUENCER_204] = 0x61, - [WM8915_WRITE_SEQUENCER_205] = 0x11, - [WM8915_WRITE_SEQUENCER_206] = 0x401, - [WM8915_WRITE_SEQUENCER_208] = 0x50, - [WM8915_WRITE_SEQUENCER_209] = 0x3, - [WM8915_WRITE_SEQUENCER_210] = 0x102, - [WM8915_WRITE_SEQUENCER_212] = 0x61, - [WM8915_WRITE_SEQUENCER_213] = 0x3b, - [WM8915_WRITE_SEQUENCER_214] = 0x502, - [WM8915_WRITE_SEQUENCER_215] = 0x100, - [WM8915_WRITE_SEQUENCER_216] = 0x2fff, - [WM8915_WRITE_SEQUENCER_220] = 0x2fff, - [WM8915_WRITE_SEQUENCER_224] = 0x2fff, - [WM8915_WRITE_SEQUENCER_228] = 0x2fff, - [WM8915_WRITE_SEQUENCER_232] = 0x2fff, - [WM8915_WRITE_SEQUENCER_236] = 0x2fff, - [WM8915_WRITE_SEQUENCER_240] = 0x2fff, - [WM8915_WRITE_SEQUENCER_244] = 0x2fff, - [WM8915_WRITE_SEQUENCER_248] = 0x2fff, - [WM8915_WRITE_SEQUENCER_252] = 0x2fff, - [WM8915_WRITE_SEQUENCER_256] = 0x60, - [WM8915_WRITE_SEQUENCER_258] = 0x601, - [WM8915_WRITE_SEQUENCER_260] = 0x50, - [WM8915_WRITE_SEQUENCER_262] = 0x100, - [WM8915_WRITE_SEQUENCER_264] = 0x1, - [WM8915_WRITE_SEQUENCER_266] = 0x104, - [WM8915_WRITE_SEQUENCER_267] = 0x100, - [WM8915_WRITE_SEQUENCER_268] = 0x2fff, - [WM8915_WRITE_SEQUENCER_272] = 0x2fff, - [WM8915_WRITE_SEQUENCER_276] = 0x2fff, - [WM8915_WRITE_SEQUENCER_280] = 0x2fff, - [WM8915_WRITE_SEQUENCER_284] = 0x2fff, - [WM8915_WRITE_SEQUENCER_288] = 0x2fff, - [WM8915_WRITE_SEQUENCER_292] = 0x2fff, - [WM8915_WRITE_SEQUENCER_296] = 0x2fff, - [WM8915_WRITE_SEQUENCER_300] = 0x2fff, - [WM8915_WRITE_SEQUENCER_304] = 0x2fff, - [WM8915_WRITE_SEQUENCER_308] = 0x2fff, - [WM8915_WRITE_SEQUENCER_312] = 0x2fff, - [WM8915_WRITE_SEQUENCER_316] = 0x2fff, - [WM8915_WRITE_SEQUENCER_320] = 0x61, - [WM8915_WRITE_SEQUENCER_322] = 0x601, - [WM8915_WRITE_SEQUENCER_324] = 0x50, - [WM8915_WRITE_SEQUENCER_326] = 0x102, - [WM8915_WRITE_SEQUENCER_328] = 0x1, - [WM8915_WRITE_SEQUENCER_330] = 0x106, - [WM8915_WRITE_SEQUENCER_331] = 0x100, - [WM8915_WRITE_SEQUENCER_332] = 0x2fff, - [WM8915_WRITE_SEQUENCER_336] = 0x2fff, - [WM8915_WRITE_SEQUENCER_340] = 0x2fff, - [WM8915_WRITE_SEQUENCER_344] = 0x2fff, - [WM8915_WRITE_SEQUENCER_348] = 0x2fff, - [WM8915_WRITE_SEQUENCER_352] = 0x2fff, - [WM8915_WRITE_SEQUENCER_356] = 0x2fff, - [WM8915_WRITE_SEQUENCER_360] = 0x2fff, - [WM8915_WRITE_SEQUENCER_364] = 0x2fff, - [WM8915_WRITE_SEQUENCER_368] = 0x2fff, - [WM8915_WRITE_SEQUENCER_372] = 0x2fff, - [WM8915_WRITE_SEQUENCER_376] = 0x2fff, - [WM8915_WRITE_SEQUENCER_380] = 0x2fff, - [WM8915_WRITE_SEQUENCER_384] = 0x60, - [WM8915_WRITE_SEQUENCER_386] = 0x601, - [WM8915_WRITE_SEQUENCER_388] = 0x61, - [WM8915_WRITE_SEQUENCER_390] = 0x601, - [WM8915_WRITE_SEQUENCER_392] = 0x50, - [WM8915_WRITE_SEQUENCER_394] = 0x300, - [WM8915_WRITE_SEQUENCER_396] = 0x1, - [WM8915_WRITE_SEQUENCER_398] = 0x304, - [WM8915_WRITE_SEQUENCER_400] = 0x40, - [WM8915_WRITE_SEQUENCER_402] = 0xf, - [WM8915_WRITE_SEQUENCER_404] = 0x1, - [WM8915_WRITE_SEQUENCER_407] = 0x100, -}; - -static const DECLARE_TLV_DB_SCALE(inpga_tlv, 0, 100, 0); -static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0); -static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); -static const DECLARE_TLV_DB_SCALE(out_digital_tlv, -1200, 150, 0); -static const DECLARE_TLV_DB_SCALE(out_tlv, -900, 75, 0); -static const DECLARE_TLV_DB_SCALE(spk_tlv, -900, 150, 0); -static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); - -static const char *sidetone_hpf_text[] = { - "2.9kHz", "1.5kHz", "735Hz", "403Hz", "196Hz", "98Hz", "49Hz" -}; - -static const struct soc_enum sidetone_hpf = - SOC_ENUM_SINGLE(WM8915_SIDETONE, 7, 6, sidetone_hpf_text); - -static const char *hpf_mode_text[] = { - "HiFi", "Custom", "Voice" -}; - -static const struct soc_enum dsp1tx_hpf_mode = - SOC_ENUM_SINGLE(WM8915_DSP1_TX_FILTERS, 3, 3, hpf_mode_text); - -static const struct soc_enum dsp2tx_hpf_mode = - SOC_ENUM_SINGLE(WM8915_DSP2_TX_FILTERS, 3, 3, hpf_mode_text); - -static const char *hpf_cutoff_text[] = { - "50Hz", "75Hz", "100Hz", "150Hz", "200Hz", "300Hz", "400Hz" -}; - -static const struct soc_enum dsp1tx_hpf_cutoff = - SOC_ENUM_SINGLE(WM8915_DSP1_TX_FILTERS, 0, 7, hpf_cutoff_text); - -static const struct soc_enum dsp2tx_hpf_cutoff = - SOC_ENUM_SINGLE(WM8915_DSP2_TX_FILTERS, 0, 7, hpf_cutoff_text); - -static void wm8915_set_retune_mobile(struct snd_soc_codec *codec, int block) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - struct wm8915_pdata *pdata = &wm8915->pdata; - int base, best, best_val, save, i, cfg, iface; - - if (!wm8915->num_retune_mobile_texts) - return; - - switch (block) { - case 0: - base = WM8915_DSP1_RX_EQ_GAINS_1; - if (snd_soc_read(codec, WM8915_POWER_MANAGEMENT_8) & - WM8915_DSP1RX_SRC) - iface = 1; - else - iface = 0; - break; - case 1: - base = WM8915_DSP1_RX_EQ_GAINS_2; - if (snd_soc_read(codec, WM8915_POWER_MANAGEMENT_8) & - WM8915_DSP2RX_SRC) - iface = 1; - else - iface = 0; - break; - default: - return; - } - - /* Find the version of the currently selected configuration - * with the nearest sample rate. */ - cfg = wm8915->retune_mobile_cfg[block]; - best = 0; - best_val = INT_MAX; - for (i = 0; i < pdata->num_retune_mobile_cfgs; i++) { - if (strcmp(pdata->retune_mobile_cfgs[i].name, - wm8915->retune_mobile_texts[cfg]) == 0 && - abs(pdata->retune_mobile_cfgs[i].rate - - wm8915->rx_rate[iface]) < best_val) { - best = i; - best_val = abs(pdata->retune_mobile_cfgs[i].rate - - wm8915->rx_rate[iface]); - } - } - - dev_dbg(codec->dev, "ReTune Mobile %d %s/%dHz for %dHz sample rate\n", - block, - pdata->retune_mobile_cfgs[best].name, - pdata->retune_mobile_cfgs[best].rate, - wm8915->rx_rate[iface]); - - /* The EQ will be disabled while reconfiguring it, remember the - * current configuration. - */ - save = snd_soc_read(codec, base); - save &= WM8915_DSP1RX_EQ_ENA; - - for (i = 0; i < ARRAY_SIZE(pdata->retune_mobile_cfgs[best].regs); i++) - snd_soc_update_bits(codec, base + i, 0xffff, - pdata->retune_mobile_cfgs[best].regs[i]); - - snd_soc_update_bits(codec, base, WM8915_DSP1RX_EQ_ENA, save); -} - -/* Icky as hell but saves code duplication */ -static int wm8915_get_retune_mobile_block(const char *name) -{ - if (strcmp(name, "DSP1 EQ Mode") == 0) - return 0; - if (strcmp(name, "DSP2 EQ Mode") == 0) - return 1; - return -EINVAL; -} - -static int wm8915_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - struct wm8915_pdata *pdata = &wm8915->pdata; - int block = wm8915_get_retune_mobile_block(kcontrol->id.name); - int value = ucontrol->value.integer.value[0]; - - if (block < 0) - return block; - - if (value >= pdata->num_retune_mobile_cfgs) - return -EINVAL; - - wm8915->retune_mobile_cfg[block] = value; - - wm8915_set_retune_mobile(codec, block); - - return 0; -} - -static int wm8915_get_retune_mobile_enum(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int block = wm8915_get_retune_mobile_block(kcontrol->id.name); - - ucontrol->value.enumerated.item[0] = wm8915->retune_mobile_cfg[block]; - - return 0; -} - -static const struct snd_kcontrol_new wm8915_snd_controls[] = { -SOC_DOUBLE_R_TLV("Capture Volume", WM8915_LEFT_LINE_INPUT_VOLUME, - WM8915_RIGHT_LINE_INPUT_VOLUME, 0, 31, 0, inpga_tlv), -SOC_DOUBLE_R("Capture ZC Switch", WM8915_LEFT_LINE_INPUT_VOLUME, - WM8915_RIGHT_LINE_INPUT_VOLUME, 5, 1, 0), - -SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8915_DAC1_MIXER_VOLUMES, - 0, 5, 24, 0, sidetone_tlv), -SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8915_DAC2_MIXER_VOLUMES, - 0, 5, 24, 0, sidetone_tlv), -SOC_SINGLE("Sidetone LPF Switch", WM8915_SIDETONE, 12, 1, 0), -SOC_ENUM("Sidetone HPF Cut-off", sidetone_hpf), -SOC_SINGLE("Sidetone HPF Switch", WM8915_SIDETONE, 6, 1, 0), - -SOC_DOUBLE_R_TLV("DSP1 Capture Volume", WM8915_DSP1_TX_LEFT_VOLUME, - WM8915_DSP1_TX_RIGHT_VOLUME, 1, 96, 0, digital_tlv), -SOC_DOUBLE_R_TLV("DSP2 Capture Volume", WM8915_DSP2_TX_LEFT_VOLUME, - WM8915_DSP2_TX_RIGHT_VOLUME, 1, 96, 0, digital_tlv), - -SOC_SINGLE("DSP1 Capture Notch Filter Switch", WM8915_DSP1_TX_FILTERS, - 13, 1, 0), -SOC_DOUBLE("DSP1 Capture HPF Switch", WM8915_DSP1_TX_FILTERS, 12, 11, 1, 0), -SOC_ENUM("DSP1 Capture HPF Mode", dsp1tx_hpf_mode), -SOC_ENUM("DSP1 Capture HPF Cutoff", dsp1tx_hpf_cutoff), - -SOC_SINGLE("DSP2 Capture Notch Filter Switch", WM8915_DSP2_TX_FILTERS, - 13, 1, 0), -SOC_DOUBLE("DSP2 Capture HPF Switch", WM8915_DSP2_TX_FILTERS, 12, 11, 1, 0), -SOC_ENUM("DSP2 Capture HPF Mode", dsp2tx_hpf_mode), -SOC_ENUM("DSP2 Capture HPF Cutoff", dsp2tx_hpf_cutoff), - -SOC_DOUBLE_R_TLV("DSP1 Playback Volume", WM8915_DSP1_RX_LEFT_VOLUME, - WM8915_DSP1_RX_RIGHT_VOLUME, 1, 112, 0, digital_tlv), -SOC_SINGLE("DSP1 Playback Switch", WM8915_DSP1_RX_FILTERS_1, 9, 1, 1), - -SOC_DOUBLE_R_TLV("DSP2 Playback Volume", WM8915_DSP2_RX_LEFT_VOLUME, - WM8915_DSP2_RX_RIGHT_VOLUME, 1, 112, 0, digital_tlv), -SOC_SINGLE("DSP2 Playback Switch", WM8915_DSP2_RX_FILTERS_1, 9, 1, 1), - -SOC_DOUBLE_R_TLV("DAC1 Volume", WM8915_DAC1_LEFT_VOLUME, - WM8915_DAC1_RIGHT_VOLUME, 1, 112, 0, digital_tlv), -SOC_DOUBLE_R("DAC1 Switch", WM8915_DAC1_LEFT_VOLUME, - WM8915_DAC1_RIGHT_VOLUME, 9, 1, 1), - -SOC_DOUBLE_R_TLV("DAC2 Volume", WM8915_DAC2_LEFT_VOLUME, - WM8915_DAC2_RIGHT_VOLUME, 1, 112, 0, digital_tlv), -SOC_DOUBLE_R("DAC2 Switch", WM8915_DAC2_LEFT_VOLUME, - WM8915_DAC2_RIGHT_VOLUME, 9, 1, 1), - -SOC_SINGLE("Speaker High Performance Switch", WM8915_OVERSAMPLING, 3, 1, 0), -SOC_SINGLE("DMIC High Performance Switch", WM8915_OVERSAMPLING, 2, 1, 0), -SOC_SINGLE("ADC High Performance Switch", WM8915_OVERSAMPLING, 1, 1, 0), -SOC_SINGLE("DAC High Performance Switch", WM8915_OVERSAMPLING, 0, 1, 0), - -SOC_SINGLE("DAC Soft Mute Switch", WM8915_DAC_SOFTMUTE, 1, 1, 0), -SOC_SINGLE("DAC Slow Soft Mute Switch", WM8915_DAC_SOFTMUTE, 0, 1, 0), - -SOC_DOUBLE_TLV("Digital Output 1 Volume", WM8915_DAC1_HPOUT1_VOLUME, 0, 4, - 8, 0, out_digital_tlv), -SOC_DOUBLE_TLV("Digital Output 2 Volume", WM8915_DAC2_HPOUT2_VOLUME, 0, 4, - 8, 0, out_digital_tlv), - -SOC_DOUBLE_R_TLV("Output 1 Volume", WM8915_OUTPUT1_LEFT_VOLUME, - WM8915_OUTPUT1_RIGHT_VOLUME, 0, 12, 0, out_tlv), -SOC_DOUBLE_R("Output 1 ZC Switch", WM8915_OUTPUT1_LEFT_VOLUME, - WM8915_OUTPUT1_RIGHT_VOLUME, 7, 1, 0), - -SOC_DOUBLE_R_TLV("Output 2 Volume", WM8915_OUTPUT2_LEFT_VOLUME, - WM8915_OUTPUT2_RIGHT_VOLUME, 0, 12, 0, out_tlv), -SOC_DOUBLE_R("Output 2 ZC Switch", WM8915_OUTPUT2_LEFT_VOLUME, - WM8915_OUTPUT2_RIGHT_VOLUME, 7, 1, 0), - -SOC_DOUBLE_TLV("Speaker Volume", WM8915_PDM_SPEAKER_VOLUME, 0, 4, 8, 0, - spk_tlv), -SOC_DOUBLE_R("Speaker Switch", WM8915_LEFT_PDM_SPEAKER, - WM8915_RIGHT_PDM_SPEAKER, 3, 1, 1), -SOC_DOUBLE_R("Speaker ZC Switch", WM8915_LEFT_PDM_SPEAKER, - WM8915_RIGHT_PDM_SPEAKER, 2, 1, 0), - -SOC_SINGLE("DSP1 EQ Switch", WM8915_DSP1_RX_EQ_GAINS_1, 0, 1, 0), -SOC_SINGLE("DSP2 EQ Switch", WM8915_DSP2_RX_EQ_GAINS_1, 0, 1, 0), -}; - -static const struct snd_kcontrol_new wm8915_eq_controls[] = { -SOC_SINGLE_TLV("DSP1 EQ B1 Volume", WM8915_DSP1_RX_EQ_GAINS_1, 11, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP1 EQ B2 Volume", WM8915_DSP1_RX_EQ_GAINS_1, 6, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP1 EQ B3 Volume", WM8915_DSP1_RX_EQ_GAINS_1, 1, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP1 EQ B4 Volume", WM8915_DSP1_RX_EQ_GAINS_2, 11, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP1 EQ B5 Volume", WM8915_DSP1_RX_EQ_GAINS_2, 6, 31, 0, - eq_tlv), - -SOC_SINGLE_TLV("DSP2 EQ B1 Volume", WM8915_DSP2_RX_EQ_GAINS_1, 11, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP2 EQ B2 Volume", WM8915_DSP2_RX_EQ_GAINS_1, 6, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP2 EQ B3 Volume", WM8915_DSP2_RX_EQ_GAINS_1, 1, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP2 EQ B4 Volume", WM8915_DSP2_RX_EQ_GAINS_2, 11, 31, 0, - eq_tlv), -SOC_SINGLE_TLV("DSP2 EQ B5 Volume", WM8915_DSP2_RX_EQ_GAINS_2, 6, 31, 0, - eq_tlv), -}; - -static int cp_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - switch (event) { - case SND_SOC_DAPM_POST_PMU: - msleep(5); - break; - default: - BUG(); - return -EINVAL; - } - - return 0; -} - -static int rmv_short_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(w->codec); - - /* Record which outputs we enabled */ - switch (event) { - case SND_SOC_DAPM_PRE_PMD: - wm8915->hpout_pending &= ~w->shift; - break; - case SND_SOC_DAPM_PRE_PMU: - wm8915->hpout_pending |= w->shift; - break; - default: - BUG(); - return -EINVAL; - } - - return 0; -} - -static void wait_for_dc_servo(struct snd_soc_codec *codec, u16 mask) -{ - struct i2c_client *i2c = to_i2c_client(codec->dev); - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int i, ret; - unsigned long timeout = 200; - - snd_soc_write(codec, WM8915_DC_SERVO_2, mask); - - /* Use the interrupt if possible */ - do { - if (i2c->irq) { - timeout = wait_for_completion_timeout(&wm8915->dcs_done, - msecs_to_jiffies(200)); - if (timeout == 0) - dev_err(codec->dev, "DC servo timed out\n"); - - } else { - msleep(1); - if (--i) { - timeout = 0; - break; - } - } - - ret = snd_soc_read(codec, WM8915_DC_SERVO_2); - dev_dbg(codec->dev, "DC servo state: %x\n", ret); - } while (ret & mask); - - if (timeout == 0) - dev_err(codec->dev, "DC servo timed out for %x\n", mask); - else - dev_dbg(codec->dev, "DC servo complete for %x\n", mask); -} - -static void wm8915_seq_notifier(struct snd_soc_dapm_context *dapm, - enum snd_soc_dapm_type event, int subseq) -{ - struct snd_soc_codec *codec = container_of(dapm, - struct snd_soc_codec, dapm); - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - u16 val, mask; - - /* Complete any pending DC servo starts */ - if (wm8915->dcs_pending) { - dev_dbg(codec->dev, "Starting DC servo for %x\n", - wm8915->dcs_pending); - - /* Trigger a startup sequence */ - wait_for_dc_servo(codec, wm8915->dcs_pending - << WM8915_DCS_TRIG_STARTUP_0_SHIFT); - - wm8915->dcs_pending = 0; - } - - if (wm8915->hpout_pending != wm8915->hpout_ena) { - dev_dbg(codec->dev, "Applying RMV_SHORTs %x->%x\n", - wm8915->hpout_ena, wm8915->hpout_pending); - - val = 0; - mask = 0; - if (wm8915->hpout_pending & HPOUT1L) { - val |= WM8915_HPOUT1L_RMV_SHORT; - mask |= WM8915_HPOUT1L_RMV_SHORT; - } else { - mask |= WM8915_HPOUT1L_RMV_SHORT | - WM8915_HPOUT1L_OUTP | - WM8915_HPOUT1L_DLY; - } - - if (wm8915->hpout_pending & HPOUT1R) { - val |= WM8915_HPOUT1R_RMV_SHORT; - mask |= WM8915_HPOUT1R_RMV_SHORT; - } else { - mask |= WM8915_HPOUT1R_RMV_SHORT | - WM8915_HPOUT1R_OUTP | - WM8915_HPOUT1R_DLY; - } - - snd_soc_update_bits(codec, WM8915_ANALOGUE_HP_1, mask, val); - - val = 0; - mask = 0; - if (wm8915->hpout_pending & HPOUT2L) { - val |= WM8915_HPOUT2L_RMV_SHORT; - mask |= WM8915_HPOUT2L_RMV_SHORT; - } else { - mask |= WM8915_HPOUT2L_RMV_SHORT | - WM8915_HPOUT2L_OUTP | - WM8915_HPOUT2L_DLY; - } - - if (wm8915->hpout_pending & HPOUT2R) { - val |= WM8915_HPOUT2R_RMV_SHORT; - mask |= WM8915_HPOUT2R_RMV_SHORT; - } else { - mask |= WM8915_HPOUT2R_RMV_SHORT | - WM8915_HPOUT2R_OUTP | - WM8915_HPOUT2R_DLY; - } - - snd_soc_update_bits(codec, WM8915_ANALOGUE_HP_2, mask, val); - - wm8915->hpout_ena = wm8915->hpout_pending; - } -} - -static int dcs_start(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(w->codec); - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - wm8915->dcs_pending |= 1 << w->shift; - break; - default: - BUG(); - return -EINVAL; - } - - return 0; -} - -static const char *sidetone_text[] = { - "IN1", "IN2", -}; - -static const struct soc_enum left_sidetone_enum = - SOC_ENUM_SINGLE(WM8915_SIDETONE, 0, 2, sidetone_text); - -static const struct snd_kcontrol_new left_sidetone = - SOC_DAPM_ENUM("Left Sidetone", left_sidetone_enum); - -static const struct soc_enum right_sidetone_enum = - SOC_ENUM_SINGLE(WM8915_SIDETONE, 1, 2, sidetone_text); - -static const struct snd_kcontrol_new right_sidetone = - SOC_DAPM_ENUM("Right Sidetone", right_sidetone_enum); - -static const char *spk_text[] = { - "DAC1L", "DAC1R", "DAC2L", "DAC2R" -}; - -static const struct soc_enum spkl_enum = - SOC_ENUM_SINGLE(WM8915_LEFT_PDM_SPEAKER, 0, 4, spk_text); - -static const struct snd_kcontrol_new spkl_mux = - SOC_DAPM_ENUM("SPKL", spkl_enum); - -static const struct soc_enum spkr_enum = - SOC_ENUM_SINGLE(WM8915_RIGHT_PDM_SPEAKER, 0, 4, spk_text); - -static const struct snd_kcontrol_new spkr_mux = - SOC_DAPM_ENUM("SPKR", spkr_enum); - -static const char *dsp1rx_text[] = { - "AIF1", "AIF2" -}; - -static const struct soc_enum dsp1rx_enum = - SOC_ENUM_SINGLE(WM8915_POWER_MANAGEMENT_8, 0, 2, dsp1rx_text); - -static const struct snd_kcontrol_new dsp1rx = - SOC_DAPM_ENUM("DSP1RX", dsp1rx_enum); - -static const char *dsp2rx_text[] = { - "AIF2", "AIF1" -}; - -static const struct soc_enum dsp2rx_enum = - SOC_ENUM_SINGLE(WM8915_POWER_MANAGEMENT_8, 4, 2, dsp2rx_text); - -static const struct snd_kcontrol_new dsp2rx = - SOC_DAPM_ENUM("DSP2RX", dsp2rx_enum); - -static const char *aif2tx_text[] = { - "DSP2", "DSP1", "AIF1" -}; - -static const struct soc_enum aif2tx_enum = - SOC_ENUM_SINGLE(WM8915_POWER_MANAGEMENT_8, 6, 3, aif2tx_text); - -static const struct snd_kcontrol_new aif2tx = - SOC_DAPM_ENUM("AIF2TX", aif2tx_enum); - -static const char *inmux_text[] = { - "ADC", "DMIC1", "DMIC2" -}; - -static const struct soc_enum in1_enum = - SOC_ENUM_SINGLE(WM8915_POWER_MANAGEMENT_7, 0, 3, inmux_text); - -static const struct snd_kcontrol_new in1_mux = - SOC_DAPM_ENUM("IN1 Mux", in1_enum); - -static const struct soc_enum in2_enum = - SOC_ENUM_SINGLE(WM8915_POWER_MANAGEMENT_7, 4, 3, inmux_text); - -static const struct snd_kcontrol_new in2_mux = - SOC_DAPM_ENUM("IN2 Mux", in2_enum); - -static const struct snd_kcontrol_new dac2r_mix[] = { -SOC_DAPM_SINGLE("Right Sidetone Switch", WM8915_DAC2_RIGHT_MIXER_ROUTING, - 5, 1, 0), -SOC_DAPM_SINGLE("Left Sidetone Switch", WM8915_DAC2_RIGHT_MIXER_ROUTING, - 4, 1, 0), -SOC_DAPM_SINGLE("DSP2 Switch", WM8915_DAC2_RIGHT_MIXER_ROUTING, 1, 1, 0), -SOC_DAPM_SINGLE("DSP1 Switch", WM8915_DAC2_RIGHT_MIXER_ROUTING, 0, 1, 0), -}; - -static const struct snd_kcontrol_new dac2l_mix[] = { -SOC_DAPM_SINGLE("Right Sidetone Switch", WM8915_DAC2_LEFT_MIXER_ROUTING, - 5, 1, 0), -SOC_DAPM_SINGLE("Left Sidetone Switch", WM8915_DAC2_LEFT_MIXER_ROUTING, - 4, 1, 0), -SOC_DAPM_SINGLE("DSP2 Switch", WM8915_DAC2_LEFT_MIXER_ROUTING, 1, 1, 0), -SOC_DAPM_SINGLE("DSP1 Switch", WM8915_DAC2_LEFT_MIXER_ROUTING, 0, 1, 0), -}; - -static const struct snd_kcontrol_new dac1r_mix[] = { -SOC_DAPM_SINGLE("Right Sidetone Switch", WM8915_DAC1_RIGHT_MIXER_ROUTING, - 5, 1, 0), -SOC_DAPM_SINGLE("Left Sidetone Switch", WM8915_DAC1_RIGHT_MIXER_ROUTING, - 4, 1, 0), -SOC_DAPM_SINGLE("DSP2 Switch", WM8915_DAC1_RIGHT_MIXER_ROUTING, 1, 1, 0), -SOC_DAPM_SINGLE("DSP1 Switch", WM8915_DAC1_RIGHT_MIXER_ROUTING, 0, 1, 0), -}; - -static const struct snd_kcontrol_new dac1l_mix[] = { -SOC_DAPM_SINGLE("Right Sidetone Switch", WM8915_DAC1_LEFT_MIXER_ROUTING, - 5, 1, 0), -SOC_DAPM_SINGLE("Left Sidetone Switch", WM8915_DAC1_LEFT_MIXER_ROUTING, - 4, 1, 0), -SOC_DAPM_SINGLE("DSP2 Switch", WM8915_DAC1_LEFT_MIXER_ROUTING, 1, 1, 0), -SOC_DAPM_SINGLE("DSP1 Switch", WM8915_DAC1_LEFT_MIXER_ROUTING, 0, 1, 0), -}; - -static const struct snd_kcontrol_new dsp1txl[] = { -SOC_DAPM_SINGLE("IN1 Switch", WM8915_DSP1_TX_LEFT_MIXER_ROUTING, - 1, 1, 0), -SOC_DAPM_SINGLE("DAC Switch", WM8915_DSP1_TX_LEFT_MIXER_ROUTING, - 0, 1, 0), -}; - -static const struct snd_kcontrol_new dsp1txr[] = { -SOC_DAPM_SINGLE("IN1 Switch", WM8915_DSP1_TX_RIGHT_MIXER_ROUTING, - 1, 1, 0), -SOC_DAPM_SINGLE("DAC Switch", WM8915_DSP1_TX_RIGHT_MIXER_ROUTING, - 0, 1, 0), -}; - -static const struct snd_kcontrol_new dsp2txl[] = { -SOC_DAPM_SINGLE("IN1 Switch", WM8915_DSP2_TX_LEFT_MIXER_ROUTING, - 1, 1, 0), -SOC_DAPM_SINGLE("DAC Switch", WM8915_DSP2_TX_LEFT_MIXER_ROUTING, - 0, 1, 0), -}; - -static const struct snd_kcontrol_new dsp2txr[] = { -SOC_DAPM_SINGLE("IN1 Switch", WM8915_DSP2_TX_RIGHT_MIXER_ROUTING, - 1, 1, 0), -SOC_DAPM_SINGLE("DAC Switch", WM8915_DSP2_TX_RIGHT_MIXER_ROUTING, - 0, 1, 0), -}; - - -static const struct snd_soc_dapm_widget wm8915_dapm_widgets[] = { -SND_SOC_DAPM_INPUT("IN1LN"), -SND_SOC_DAPM_INPUT("IN1LP"), -SND_SOC_DAPM_INPUT("IN1RN"), -SND_SOC_DAPM_INPUT("IN1RP"), - -SND_SOC_DAPM_INPUT("IN2LN"), -SND_SOC_DAPM_INPUT("IN2LP"), -SND_SOC_DAPM_INPUT("IN2RN"), -SND_SOC_DAPM_INPUT("IN2RP"), - -SND_SOC_DAPM_INPUT("DMIC1DAT"), -SND_SOC_DAPM_INPUT("DMIC2DAT"), - -SND_SOC_DAPM_SUPPLY_S("SYSCLK", 1, WM8915_AIF_CLOCKING_1, 0, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY_S("SYSDSPCLK", 2, WM8915_CLOCKING_1, 1, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY_S("AIFCLK", 2, WM8915_CLOCKING_1, 2, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY_S("Charge Pump", 2, WM8915_CHARGE_PUMP_1, 15, 0, cp_event, - SND_SOC_DAPM_POST_PMU), - -SND_SOC_DAPM_SUPPLY("LDO2", WM8915_POWER_MANAGEMENT_2, 1, 0, NULL, 0), -SND_SOC_DAPM_MICBIAS("MICB2", WM8915_POWER_MANAGEMENT_1, 9, 0), -SND_SOC_DAPM_MICBIAS("MICB1", WM8915_POWER_MANAGEMENT_1, 8, 0), - -SND_SOC_DAPM_PGA("IN1L PGA", WM8915_POWER_MANAGEMENT_2, 5, 0, NULL, 0), -SND_SOC_DAPM_PGA("IN1R PGA", WM8915_POWER_MANAGEMENT_2, 4, 0, NULL, 0), - -SND_SOC_DAPM_MUX("IN1L Mux", SND_SOC_NOPM, 0, 0, &in1_mux), -SND_SOC_DAPM_MUX("IN1R Mux", SND_SOC_NOPM, 0, 0, &in1_mux), -SND_SOC_DAPM_MUX("IN2L Mux", SND_SOC_NOPM, 0, 0, &in2_mux), -SND_SOC_DAPM_MUX("IN2R Mux", SND_SOC_NOPM, 0, 0, &in2_mux), - -SND_SOC_DAPM_PGA("IN1L", WM8915_POWER_MANAGEMENT_7, 2, 0, NULL, 0), -SND_SOC_DAPM_PGA("IN1R", WM8915_POWER_MANAGEMENT_7, 3, 0, NULL, 0), -SND_SOC_DAPM_PGA("IN2L", WM8915_POWER_MANAGEMENT_7, 6, 0, NULL, 0), -SND_SOC_DAPM_PGA("IN2R", WM8915_POWER_MANAGEMENT_7, 7, 0, NULL, 0), - -SND_SOC_DAPM_SUPPLY("DMIC2", WM8915_POWER_MANAGEMENT_7, 9, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("DMIC1", WM8915_POWER_MANAGEMENT_7, 8, 0, NULL, 0), - -SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8915_POWER_MANAGEMENT_3, 5, 0), -SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8915_POWER_MANAGEMENT_3, 4, 0), -SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8915_POWER_MANAGEMENT_3, 3, 0), -SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8915_POWER_MANAGEMENT_3, 2, 0), - -SND_SOC_DAPM_ADC("ADCL", NULL, WM8915_POWER_MANAGEMENT_3, 1, 0), -SND_SOC_DAPM_ADC("ADCR", NULL, WM8915_POWER_MANAGEMENT_3, 0, 0), - -SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &left_sidetone), -SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &right_sidetone), - -SND_SOC_DAPM_AIF_IN("DSP2RXL", NULL, 0, WM8915_POWER_MANAGEMENT_3, 11, 0), -SND_SOC_DAPM_AIF_IN("DSP2RXR", NULL, 1, WM8915_POWER_MANAGEMENT_3, 10, 0), -SND_SOC_DAPM_AIF_IN("DSP1RXL", NULL, 0, WM8915_POWER_MANAGEMENT_3, 9, 0), -SND_SOC_DAPM_AIF_IN("DSP1RXR", NULL, 1, WM8915_POWER_MANAGEMENT_3, 8, 0), - -SND_SOC_DAPM_MIXER("DSP2TXL", WM8915_POWER_MANAGEMENT_5, 11, 0, - dsp2txl, ARRAY_SIZE(dsp2txl)), -SND_SOC_DAPM_MIXER("DSP2TXR", WM8915_POWER_MANAGEMENT_5, 10, 0, - dsp2txr, ARRAY_SIZE(dsp2txr)), -SND_SOC_DAPM_MIXER("DSP1TXL", WM8915_POWER_MANAGEMENT_5, 9, 0, - dsp1txl, ARRAY_SIZE(dsp1txl)), -SND_SOC_DAPM_MIXER("DSP1TXR", WM8915_POWER_MANAGEMENT_5, 8, 0, - dsp1txr, ARRAY_SIZE(dsp1txr)), - -SND_SOC_DAPM_MIXER("DAC2L Mixer", SND_SOC_NOPM, 0, 0, - dac2l_mix, ARRAY_SIZE(dac2l_mix)), -SND_SOC_DAPM_MIXER("DAC2R Mixer", SND_SOC_NOPM, 0, 0, - dac2r_mix, ARRAY_SIZE(dac2r_mix)), -SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, - dac1l_mix, ARRAY_SIZE(dac1l_mix)), -SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, - dac1r_mix, ARRAY_SIZE(dac1r_mix)), - -SND_SOC_DAPM_DAC("DAC2L", NULL, WM8915_POWER_MANAGEMENT_5, 3, 0), -SND_SOC_DAPM_DAC("DAC2R", NULL, WM8915_POWER_MANAGEMENT_5, 2, 0), -SND_SOC_DAPM_DAC("DAC1L", NULL, WM8915_POWER_MANAGEMENT_5, 1, 0), -SND_SOC_DAPM_DAC("DAC1R", NULL, WM8915_POWER_MANAGEMENT_5, 0, 0), - -SND_SOC_DAPM_AIF_IN("AIF2RX1", "AIF2 Playback", 1, - WM8915_POWER_MANAGEMENT_4, 9, 0), -SND_SOC_DAPM_AIF_IN("AIF2RX0", "AIF2 Playback", 2, - WM8915_POWER_MANAGEMENT_4, 8, 0), - -SND_SOC_DAPM_AIF_IN("AIF2TX1", "AIF2 Capture", 1, - WM8915_POWER_MANAGEMENT_6, 9, 0), -SND_SOC_DAPM_AIF_IN("AIF2TX0", "AIF2 Capture", 2, - WM8915_POWER_MANAGEMENT_6, 8, 0), - -SND_SOC_DAPM_AIF_IN("AIF1RX5", "AIF1 Playback", 5, - WM8915_POWER_MANAGEMENT_4, 5, 0), -SND_SOC_DAPM_AIF_IN("AIF1RX4", "AIF1 Playback", 4, - WM8915_POWER_MANAGEMENT_4, 4, 0), -SND_SOC_DAPM_AIF_IN("AIF1RX3", "AIF1 Playback", 3, - WM8915_POWER_MANAGEMENT_4, 3, 0), -SND_SOC_DAPM_AIF_IN("AIF1RX2", "AIF1 Playback", 2, - WM8915_POWER_MANAGEMENT_4, 2, 0), -SND_SOC_DAPM_AIF_IN("AIF1RX1", "AIF1 Playback", 1, - WM8915_POWER_MANAGEMENT_4, 1, 0), -SND_SOC_DAPM_AIF_IN("AIF1RX0", "AIF1 Playback", 0, - WM8915_POWER_MANAGEMENT_4, 0, 0), - -SND_SOC_DAPM_AIF_OUT("AIF1TX5", "AIF1 Capture", 5, - WM8915_POWER_MANAGEMENT_6, 5, 0), -SND_SOC_DAPM_AIF_OUT("AIF1TX4", "AIF1 Capture", 4, - WM8915_POWER_MANAGEMENT_6, 4, 0), -SND_SOC_DAPM_AIF_OUT("AIF1TX3", "AIF1 Capture", 3, - WM8915_POWER_MANAGEMENT_6, 3, 0), -SND_SOC_DAPM_AIF_OUT("AIF1TX2", "AIF1 Capture", 2, - WM8915_POWER_MANAGEMENT_6, 2, 0), -SND_SOC_DAPM_AIF_OUT("AIF1TX1", "AIF1 Capture", 1, - WM8915_POWER_MANAGEMENT_6, 1, 0), -SND_SOC_DAPM_AIF_OUT("AIF1TX0", "AIF1 Capture", 0, - WM8915_POWER_MANAGEMENT_6, 0, 0), - -/* We route as stereo pairs so define some dummy widgets to squash - * things down for now. RXA = 0,1, RXB = 2,3 and so on */ -SND_SOC_DAPM_PGA("AIF1RXA", SND_SOC_NOPM, 0, 0, NULL, 0), -SND_SOC_DAPM_PGA("AIF1RXB", SND_SOC_NOPM, 0, 0, NULL, 0), -SND_SOC_DAPM_PGA("AIF1RXC", SND_SOC_NOPM, 0, 0, NULL, 0), -SND_SOC_DAPM_PGA("AIF2RX", SND_SOC_NOPM, 0, 0, NULL, 0), -SND_SOC_DAPM_PGA("DSP2TX", SND_SOC_NOPM, 0, 0, NULL, 0), - -SND_SOC_DAPM_MUX("DSP1RX", SND_SOC_NOPM, 0, 0, &dsp1rx), -SND_SOC_DAPM_MUX("DSP2RX", SND_SOC_NOPM, 0, 0, &dsp2rx), -SND_SOC_DAPM_MUX("AIF2TX", SND_SOC_NOPM, 0, 0, &aif2tx), - -SND_SOC_DAPM_MUX("SPKL", SND_SOC_NOPM, 0, 0, &spkl_mux), -SND_SOC_DAPM_MUX("SPKR", SND_SOC_NOPM, 0, 0, &spkr_mux), -SND_SOC_DAPM_PGA("SPKL PGA", WM8915_LEFT_PDM_SPEAKER, 4, 0, NULL, 0), -SND_SOC_DAPM_PGA("SPKR PGA", WM8915_RIGHT_PDM_SPEAKER, 4, 0, NULL, 0), - -SND_SOC_DAPM_PGA_S("HPOUT2L PGA", 0, WM8915_POWER_MANAGEMENT_1, 7, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT2L_DLY", 1, WM8915_ANALOGUE_HP_2, 5, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT2L_DCS", 2, WM8915_DC_SERVO_1, 2, 0, dcs_start, - SND_SOC_DAPM_POST_PMU), -SND_SOC_DAPM_PGA_S("HPOUT2L_OUTP", 3, WM8915_ANALOGUE_HP_2, 6, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT2L_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT2L, 0, - rmv_short_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), - -SND_SOC_DAPM_PGA_S("HPOUT2R PGA", 0, WM8915_POWER_MANAGEMENT_1, 6, 0,NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT2R_DLY", 1, WM8915_ANALOGUE_HP_2, 1, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT2R_DCS", 2, WM8915_DC_SERVO_1, 3, 0, dcs_start, - SND_SOC_DAPM_POST_PMU), -SND_SOC_DAPM_PGA_S("HPOUT2R_OUTP", 3, WM8915_ANALOGUE_HP_2, 2, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT2R_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT2R, 0, - rmv_short_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), - -SND_SOC_DAPM_PGA_S("HPOUT1L PGA", 0, WM8915_POWER_MANAGEMENT_1, 5, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT1L_DLY", 1, WM8915_ANALOGUE_HP_1, 5, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT1L_DCS", 2, WM8915_DC_SERVO_1, 0, 0, dcs_start, - SND_SOC_DAPM_POST_PMU), -SND_SOC_DAPM_PGA_S("HPOUT1L_OUTP", 3, WM8915_ANALOGUE_HP_1, 6, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT1L_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT1L, 0, - rmv_short_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), - -SND_SOC_DAPM_PGA_S("HPOUT1R PGA", 0, WM8915_POWER_MANAGEMENT_1, 4, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT1R_DLY", 1, WM8915_ANALOGUE_HP_1, 1, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT1R_DCS", 2, WM8915_DC_SERVO_1, 1, 0, dcs_start, - SND_SOC_DAPM_POST_PMU), -SND_SOC_DAPM_PGA_S("HPOUT1R_OUTP", 3, WM8915_ANALOGUE_HP_1, 2, 0, NULL, 0), -SND_SOC_DAPM_PGA_S("HPOUT1R_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT1R, 0, - rmv_short_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), - -SND_SOC_DAPM_OUTPUT("HPOUT1L"), -SND_SOC_DAPM_OUTPUT("HPOUT1R"), -SND_SOC_DAPM_OUTPUT("HPOUT2L"), -SND_SOC_DAPM_OUTPUT("HPOUT2R"), -SND_SOC_DAPM_OUTPUT("SPKDAT"), -}; - -static const struct snd_soc_dapm_route wm8915_dapm_routes[] = { - { "AIFCLK", NULL, "SYSCLK" }, - { "SYSDSPCLK", NULL, "SYSCLK" }, - { "Charge Pump", NULL, "SYSCLK" }, - - { "MICB1", NULL, "LDO2" }, - { "MICB2", NULL, "LDO2" }, - - { "IN1L PGA", NULL, "IN2LN" }, - { "IN1L PGA", NULL, "IN2LP" }, - { "IN1L PGA", NULL, "IN1LN" }, - { "IN1L PGA", NULL, "IN1LP" }, - - { "IN1R PGA", NULL, "IN2RN" }, - { "IN1R PGA", NULL, "IN2RP" }, - { "IN1R PGA", NULL, "IN1RN" }, - { "IN1R PGA", NULL, "IN1RP" }, - - { "ADCL", NULL, "IN1L PGA" }, - - { "ADCR", NULL, "IN1R PGA" }, - - { "DMIC1L", NULL, "DMIC1DAT" }, - { "DMIC1R", NULL, "DMIC1DAT" }, - { "DMIC2L", NULL, "DMIC2DAT" }, - { "DMIC2R", NULL, "DMIC2DAT" }, - - { "DMIC2L", NULL, "DMIC2" }, - { "DMIC2R", NULL, "DMIC2" }, - { "DMIC1L", NULL, "DMIC1" }, - { "DMIC1R", NULL, "DMIC1" }, - - { "IN1L Mux", "ADC", "ADCL" }, - { "IN1L Mux", "DMIC1", "DMIC1L" }, - { "IN1L Mux", "DMIC2", "DMIC2L" }, - - { "IN1R Mux", "ADC", "ADCR" }, - { "IN1R Mux", "DMIC1", "DMIC1R" }, - { "IN1R Mux", "DMIC2", "DMIC2R" }, - - { "IN2L Mux", "ADC", "ADCL" }, - { "IN2L Mux", "DMIC1", "DMIC1L" }, - { "IN2L Mux", "DMIC2", "DMIC2L" }, - - { "IN2R Mux", "ADC", "ADCR" }, - { "IN2R Mux", "DMIC1", "DMIC1R" }, - { "IN2R Mux", "DMIC2", "DMIC2R" }, - - { "Left Sidetone", "IN1", "IN1L Mux" }, - { "Left Sidetone", "IN2", "IN2L Mux" }, - - { "Right Sidetone", "IN1", "IN1R Mux" }, - { "Right Sidetone", "IN2", "IN2R Mux" }, - - { "DSP1TXL", "IN1 Switch", "IN1L Mux" }, - { "DSP1TXR", "IN1 Switch", "IN1R Mux" }, - - { "DSP2TXL", "IN1 Switch", "IN2L Mux" }, - { "DSP2TXR", "IN1 Switch", "IN2R Mux" }, - - { "AIF1TX0", NULL, "DSP1TXL" }, - { "AIF1TX1", NULL, "DSP1TXR" }, - { "AIF1TX2", NULL, "DSP2TXL" }, - { "AIF1TX3", NULL, "DSP2TXR" }, - { "AIF1TX4", NULL, "AIF2RX0" }, - { "AIF1TX5", NULL, "AIF2RX1" }, - - { "AIF1RX0", NULL, "AIFCLK" }, - { "AIF1RX1", NULL, "AIFCLK" }, - { "AIF1RX2", NULL, "AIFCLK" }, - { "AIF1RX3", NULL, "AIFCLK" }, - { "AIF1RX4", NULL, "AIFCLK" }, - { "AIF1RX5", NULL, "AIFCLK" }, - - { "AIF2RX0", NULL, "AIFCLK" }, - { "AIF2RX1", NULL, "AIFCLK" }, - - { "DSP1RXL", NULL, "SYSDSPCLK" }, - { "DSP1RXR", NULL, "SYSDSPCLK" }, - { "DSP2RXL", NULL, "SYSDSPCLK" }, - { "DSP2RXR", NULL, "SYSDSPCLK" }, - { "DSP1TXL", NULL, "SYSDSPCLK" }, - { "DSP1TXR", NULL, "SYSDSPCLK" }, - { "DSP2TXL", NULL, "SYSDSPCLK" }, - { "DSP2TXR", NULL, "SYSDSPCLK" }, - - { "AIF1RXA", NULL, "AIF1RX0" }, - { "AIF1RXA", NULL, "AIF1RX1" }, - { "AIF1RXB", NULL, "AIF1RX2" }, - { "AIF1RXB", NULL, "AIF1RX3" }, - { "AIF1RXC", NULL, "AIF1RX4" }, - { "AIF1RXC", NULL, "AIF1RX5" }, - - { "AIF2RX", NULL, "AIF2RX0" }, - { "AIF2RX", NULL, "AIF2RX1" }, - - { "AIF2TX", "DSP2", "DSP2TX" }, - { "AIF2TX", "DSP1", "DSP1RX" }, - { "AIF2TX", "AIF1", "AIF1RXC" }, - - { "DSP1RXL", NULL, "DSP1RX" }, - { "DSP1RXR", NULL, "DSP1RX" }, - { "DSP2RXL", NULL, "DSP2RX" }, - { "DSP2RXR", NULL, "DSP2RX" }, - - { "DSP2TX", NULL, "DSP2TXL" }, - { "DSP2TX", NULL, "DSP2TXR" }, - - { "DSP1RX", "AIF1", "AIF1RXA" }, - { "DSP1RX", "AIF2", "AIF2RX" }, - - { "DSP2RX", "AIF1", "AIF1RXB" }, - { "DSP2RX", "AIF2", "AIF2RX" }, - - { "DAC2L Mixer", "DSP2 Switch", "DSP2RXL" }, - { "DAC2L Mixer", "DSP1 Switch", "DSP1RXL" }, - { "DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, - { "DAC2L Mixer", "Left Sidetone Switch", "Left Sidetone" }, - - { "DAC2R Mixer", "DSP2 Switch", "DSP2RXR" }, - { "DAC2R Mixer", "DSP1 Switch", "DSP1RXR" }, - { "DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" }, - { "DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" }, - - { "DAC1L Mixer", "DSP2 Switch", "DSP2RXL" }, - { "DAC1L Mixer", "DSP1 Switch", "DSP1RXL" }, - { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, - { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, - - { "DAC1R Mixer", "DSP2 Switch", "DSP2RXR" }, - { "DAC1R Mixer", "DSP1 Switch", "DSP1RXR" }, - { "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" }, - { "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" }, - - { "DAC1L", NULL, "DAC1L Mixer" }, - { "DAC1R", NULL, "DAC1R Mixer" }, - { "DAC2L", NULL, "DAC2L Mixer" }, - { "DAC2R", NULL, "DAC2R Mixer" }, - - { "HPOUT2L PGA", NULL, "Charge Pump" }, - { "HPOUT2L PGA", NULL, "DAC2L" }, - { "HPOUT2L_DLY", NULL, "HPOUT2L PGA" }, - { "HPOUT2L_DCS", NULL, "HPOUT2L_DLY" }, - { "HPOUT2L_OUTP", NULL, "HPOUT2L_DCS" }, - { "HPOUT2L_RMV_SHORT", NULL, "HPOUT2L_OUTP" }, - - { "HPOUT2R PGA", NULL, "Charge Pump" }, - { "HPOUT2R PGA", NULL, "DAC2R" }, - { "HPOUT2R_DLY", NULL, "HPOUT2R PGA" }, - { "HPOUT2R_DCS", NULL, "HPOUT2R_DLY" }, - { "HPOUT2R_OUTP", NULL, "HPOUT2R_DCS" }, - { "HPOUT2R_RMV_SHORT", NULL, "HPOUT2R_OUTP" }, - - { "HPOUT1L PGA", NULL, "Charge Pump" }, - { "HPOUT1L PGA", NULL, "DAC1L" }, - { "HPOUT1L_DLY", NULL, "HPOUT1L PGA" }, - { "HPOUT1L_DCS", NULL, "HPOUT1L_DLY" }, - { "HPOUT1L_OUTP", NULL, "HPOUT1L_DCS" }, - { "HPOUT1L_RMV_SHORT", NULL, "HPOUT1L_OUTP" }, - - { "HPOUT1R PGA", NULL, "Charge Pump" }, - { "HPOUT1R PGA", NULL, "DAC1R" }, - { "HPOUT1R_DLY", NULL, "HPOUT1R PGA" }, - { "HPOUT1R_DCS", NULL, "HPOUT1R_DLY" }, - { "HPOUT1R_OUTP", NULL, "HPOUT1R_DCS" }, - { "HPOUT1R_RMV_SHORT", NULL, "HPOUT1R_OUTP" }, - - { "HPOUT2L", NULL, "HPOUT2L_RMV_SHORT" }, - { "HPOUT2R", NULL, "HPOUT2R_RMV_SHORT" }, - { "HPOUT1L", NULL, "HPOUT1L_RMV_SHORT" }, - { "HPOUT1R", NULL, "HPOUT1R_RMV_SHORT" }, - - { "SPKL", "DAC1L", "DAC1L" }, - { "SPKL", "DAC1R", "DAC1R" }, - { "SPKL", "DAC2L", "DAC2L" }, - { "SPKL", "DAC2R", "DAC2R" }, - - { "SPKR", "DAC1L", "DAC1L" }, - { "SPKR", "DAC1R", "DAC1R" }, - { "SPKR", "DAC2L", "DAC2L" }, - { "SPKR", "DAC2R", "DAC2R" }, - - { "SPKL PGA", NULL, "SPKL" }, - { "SPKR PGA", NULL, "SPKR" }, - - { "SPKDAT", NULL, "SPKL PGA" }, - { "SPKDAT", NULL, "SPKR PGA" }, -}; - -static int wm8915_readable_register(struct snd_soc_codec *codec, - unsigned int reg) -{ - /* Due to the sparseness of the register map the compiler - * output from an explicit switch statement ends up being much - * more efficient than a table. - */ - switch (reg) { - case WM8915_SOFTWARE_RESET: - case WM8915_POWER_MANAGEMENT_1: - case WM8915_POWER_MANAGEMENT_2: - case WM8915_POWER_MANAGEMENT_3: - case WM8915_POWER_MANAGEMENT_4: - case WM8915_POWER_MANAGEMENT_5: - case WM8915_POWER_MANAGEMENT_6: - case WM8915_POWER_MANAGEMENT_7: - case WM8915_POWER_MANAGEMENT_8: - case WM8915_LEFT_LINE_INPUT_VOLUME: - case WM8915_RIGHT_LINE_INPUT_VOLUME: - case WM8915_LINE_INPUT_CONTROL: - case WM8915_DAC1_HPOUT1_VOLUME: - case WM8915_DAC2_HPOUT2_VOLUME: - case WM8915_DAC1_LEFT_VOLUME: - case WM8915_DAC1_RIGHT_VOLUME: - case WM8915_DAC2_LEFT_VOLUME: - case WM8915_DAC2_RIGHT_VOLUME: - case WM8915_OUTPUT1_LEFT_VOLUME: - case WM8915_OUTPUT1_RIGHT_VOLUME: - case WM8915_OUTPUT2_LEFT_VOLUME: - case WM8915_OUTPUT2_RIGHT_VOLUME: - case WM8915_MICBIAS_1: - case WM8915_MICBIAS_2: - case WM8915_LDO_1: - case WM8915_LDO_2: - case WM8915_ACCESSORY_DETECT_MODE_1: - case WM8915_ACCESSORY_DETECT_MODE_2: - case WM8915_HEADPHONE_DETECT_1: - case WM8915_HEADPHONE_DETECT_2: - case WM8915_MIC_DETECT_1: - case WM8915_MIC_DETECT_2: - case WM8915_MIC_DETECT_3: - case WM8915_CHARGE_PUMP_1: - case WM8915_CHARGE_PUMP_2: - case WM8915_DC_SERVO_1: - case WM8915_DC_SERVO_2: - case WM8915_DC_SERVO_3: - case WM8915_DC_SERVO_5: - case WM8915_DC_SERVO_6: - case WM8915_DC_SERVO_7: - case WM8915_DC_SERVO_READBACK_0: - case WM8915_ANALOGUE_HP_1: - case WM8915_ANALOGUE_HP_2: - case WM8915_CHIP_REVISION: - case WM8915_CONTROL_INTERFACE_1: - case WM8915_WRITE_SEQUENCER_CTRL_1: - case WM8915_WRITE_SEQUENCER_CTRL_2: - case WM8915_AIF_CLOCKING_1: - case WM8915_AIF_CLOCKING_2: - case WM8915_CLOCKING_1: - case WM8915_CLOCKING_2: - case WM8915_AIF_RATE: - case WM8915_FLL_CONTROL_1: - case WM8915_FLL_CONTROL_2: - case WM8915_FLL_CONTROL_3: - case WM8915_FLL_CONTROL_4: - case WM8915_FLL_CONTROL_5: - case WM8915_FLL_CONTROL_6: - case WM8915_FLL_EFS_1: - case WM8915_FLL_EFS_2: - case WM8915_AIF1_CONTROL: - case WM8915_AIF1_BCLK: - case WM8915_AIF1_TX_LRCLK_1: - case WM8915_AIF1_TX_LRCLK_2: - case WM8915_AIF1_RX_LRCLK_1: - case WM8915_AIF1_RX_LRCLK_2: - case WM8915_AIF1TX_DATA_CONFIGURATION_1: - case WM8915_AIF1TX_DATA_CONFIGURATION_2: - case WM8915_AIF1RX_DATA_CONFIGURATION: - case WM8915_AIF1TX_CHANNEL_0_CONFIGURATION: - case WM8915_AIF1TX_CHANNEL_1_CONFIGURATION: - case WM8915_AIF1TX_CHANNEL_2_CONFIGURATION: - case WM8915_AIF1TX_CHANNEL_3_CONFIGURATION: - case WM8915_AIF1TX_CHANNEL_4_CONFIGURATION: - case WM8915_AIF1TX_CHANNEL_5_CONFIGURATION: - case WM8915_AIF1RX_CHANNEL_0_CONFIGURATION: - case WM8915_AIF1RX_CHANNEL_1_CONFIGURATION: - case WM8915_AIF1RX_CHANNEL_2_CONFIGURATION: - case WM8915_AIF1RX_CHANNEL_3_CONFIGURATION: - case WM8915_AIF1RX_CHANNEL_4_CONFIGURATION: - case WM8915_AIF1RX_CHANNEL_5_CONFIGURATION: - case WM8915_AIF1RX_MONO_CONFIGURATION: - case WM8915_AIF1TX_TEST: - case WM8915_AIF2_CONTROL: - case WM8915_AIF2_BCLK: - case WM8915_AIF2_TX_LRCLK_1: - case WM8915_AIF2_TX_LRCLK_2: - case WM8915_AIF2_RX_LRCLK_1: - case WM8915_AIF2_RX_LRCLK_2: - case WM8915_AIF2TX_DATA_CONFIGURATION_1: - case WM8915_AIF2TX_DATA_CONFIGURATION_2: - case WM8915_AIF2RX_DATA_CONFIGURATION: - case WM8915_AIF2TX_CHANNEL_0_CONFIGURATION: - case WM8915_AIF2TX_CHANNEL_1_CONFIGURATION: - case WM8915_AIF2RX_CHANNEL_0_CONFIGURATION: - case WM8915_AIF2RX_CHANNEL_1_CONFIGURATION: - case WM8915_AIF2RX_MONO_CONFIGURATION: - case WM8915_AIF2TX_TEST: - case WM8915_DSP1_TX_LEFT_VOLUME: - case WM8915_DSP1_TX_RIGHT_VOLUME: - case WM8915_DSP1_RX_LEFT_VOLUME: - case WM8915_DSP1_RX_RIGHT_VOLUME: - case WM8915_DSP1_TX_FILTERS: - case WM8915_DSP1_RX_FILTERS_1: - case WM8915_DSP1_RX_FILTERS_2: - case WM8915_DSP1_DRC_1: - case WM8915_DSP1_DRC_2: - case WM8915_DSP1_DRC_3: - case WM8915_DSP1_DRC_4: - case WM8915_DSP1_DRC_5: - case WM8915_DSP1_RX_EQ_GAINS_1: - case WM8915_DSP1_RX_EQ_GAINS_2: - case WM8915_DSP1_RX_EQ_BAND_1_A: - case WM8915_DSP1_RX_EQ_BAND_1_B: - case WM8915_DSP1_RX_EQ_BAND_1_PG: - case WM8915_DSP1_RX_EQ_BAND_2_A: - case WM8915_DSP1_RX_EQ_BAND_2_B: - case WM8915_DSP1_RX_EQ_BAND_2_C: - case WM8915_DSP1_RX_EQ_BAND_2_PG: - case WM8915_DSP1_RX_EQ_BAND_3_A: - case WM8915_DSP1_RX_EQ_BAND_3_B: - case WM8915_DSP1_RX_EQ_BAND_3_C: - case WM8915_DSP1_RX_EQ_BAND_3_PG: - case WM8915_DSP1_RX_EQ_BAND_4_A: - case WM8915_DSP1_RX_EQ_BAND_4_B: - case WM8915_DSP1_RX_EQ_BAND_4_C: - case WM8915_DSP1_RX_EQ_BAND_4_PG: - case WM8915_DSP1_RX_EQ_BAND_5_A: - case WM8915_DSP1_RX_EQ_BAND_5_B: - case WM8915_DSP1_RX_EQ_BAND_5_PG: - case WM8915_DSP2_TX_LEFT_VOLUME: - case WM8915_DSP2_TX_RIGHT_VOLUME: - case WM8915_DSP2_RX_LEFT_VOLUME: - case WM8915_DSP2_RX_RIGHT_VOLUME: - case WM8915_DSP2_TX_FILTERS: - case WM8915_DSP2_RX_FILTERS_1: - case WM8915_DSP2_RX_FILTERS_2: - case WM8915_DSP2_DRC_1: - case WM8915_DSP2_DRC_2: - case WM8915_DSP2_DRC_3: - case WM8915_DSP2_DRC_4: - case WM8915_DSP2_DRC_5: - case WM8915_DSP2_RX_EQ_GAINS_1: - case WM8915_DSP2_RX_EQ_GAINS_2: - case WM8915_DSP2_RX_EQ_BAND_1_A: - case WM8915_DSP2_RX_EQ_BAND_1_B: - case WM8915_DSP2_RX_EQ_BAND_1_PG: - case WM8915_DSP2_RX_EQ_BAND_2_A: - case WM8915_DSP2_RX_EQ_BAND_2_B: - case WM8915_DSP2_RX_EQ_BAND_2_C: - case WM8915_DSP2_RX_EQ_BAND_2_PG: - case WM8915_DSP2_RX_EQ_BAND_3_A: - case WM8915_DSP2_RX_EQ_BAND_3_B: - case WM8915_DSP2_RX_EQ_BAND_3_C: - case WM8915_DSP2_RX_EQ_BAND_3_PG: - case WM8915_DSP2_RX_EQ_BAND_4_A: - case WM8915_DSP2_RX_EQ_BAND_4_B: - case WM8915_DSP2_RX_EQ_BAND_4_C: - case WM8915_DSP2_RX_EQ_BAND_4_PG: - case WM8915_DSP2_RX_EQ_BAND_5_A: - case WM8915_DSP2_RX_EQ_BAND_5_B: - case WM8915_DSP2_RX_EQ_BAND_5_PG: - case WM8915_DAC1_MIXER_VOLUMES: - case WM8915_DAC1_LEFT_MIXER_ROUTING: - case WM8915_DAC1_RIGHT_MIXER_ROUTING: - case WM8915_DAC2_MIXER_VOLUMES: - case WM8915_DAC2_LEFT_MIXER_ROUTING: - case WM8915_DAC2_RIGHT_MIXER_ROUTING: - case WM8915_DSP1_TX_LEFT_MIXER_ROUTING: - case WM8915_DSP1_TX_RIGHT_MIXER_ROUTING: - case WM8915_DSP2_TX_LEFT_MIXER_ROUTING: - case WM8915_DSP2_TX_RIGHT_MIXER_ROUTING: - case WM8915_DSP_TX_MIXER_SELECT: - case WM8915_DAC_SOFTMUTE: - case WM8915_OVERSAMPLING: - case WM8915_SIDETONE: - case WM8915_GPIO_1: - case WM8915_GPIO_2: - case WM8915_GPIO_3: - case WM8915_GPIO_4: - case WM8915_GPIO_5: - case WM8915_PULL_CONTROL_1: - case WM8915_PULL_CONTROL_2: - case WM8915_INTERRUPT_STATUS_1: - case WM8915_INTERRUPT_STATUS_2: - case WM8915_INTERRUPT_RAW_STATUS_2: - case WM8915_INTERRUPT_STATUS_1_MASK: - case WM8915_INTERRUPT_STATUS_2_MASK: - case WM8915_INTERRUPT_CONTROL: - case WM8915_LEFT_PDM_SPEAKER: - case WM8915_RIGHT_PDM_SPEAKER: - case WM8915_PDM_SPEAKER_MUTE_SEQUENCE: - case WM8915_PDM_SPEAKER_VOLUME: - return 1; - default: - return 0; - } -} - -static int wm8915_volatile_register(struct snd_soc_codec *codec, - unsigned int reg) -{ - switch (reg) { - case WM8915_SOFTWARE_RESET: - case WM8915_CHIP_REVISION: - case WM8915_LDO_1: - case WM8915_LDO_2: - case WM8915_INTERRUPT_STATUS_1: - case WM8915_INTERRUPT_STATUS_2: - case WM8915_INTERRUPT_RAW_STATUS_2: - case WM8915_DC_SERVO_READBACK_0: - case WM8915_DC_SERVO_2: - case WM8915_DC_SERVO_6: - case WM8915_DC_SERVO_7: - case WM8915_FLL_CONTROL_6: - case WM8915_MIC_DETECT_3: - case WM8915_HEADPHONE_DETECT_1: - case WM8915_HEADPHONE_DETECT_2: - return 1; - default: - return 0; - } -} - -static int wm8915_reset(struct snd_soc_codec *codec) -{ - return snd_soc_write(codec, WM8915_SOFTWARE_RESET, 0x8915); -} - -static const int bclk_divs[] = { - 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96 -}; - -static void wm8915_update_bclk(struct snd_soc_codec *codec) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int aif, best, cur_val, bclk_rate, bclk_reg, i; - - /* Don't bother if we're in a low frequency idle mode that - * can't support audio. - */ - if (wm8915->sysclk < 64000) - return; - - for (aif = 0; aif < WM8915_AIFS; aif++) { - switch (aif) { - case 0: - bclk_reg = WM8915_AIF1_BCLK; - break; - case 1: - bclk_reg = WM8915_AIF2_BCLK; - break; - } - - bclk_rate = wm8915->bclk_rate[aif]; - - /* Pick a divisor for BCLK as close as we can get to ideal */ - best = 0; - for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) { - cur_val = (wm8915->sysclk / bclk_divs[i]) - bclk_rate; - if (cur_val < 0) /* BCLK table is sorted */ - break; - best = i; - } - bclk_rate = wm8915->sysclk / bclk_divs[best]; - dev_dbg(codec->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n", - bclk_divs[best], bclk_rate); - - snd_soc_update_bits(codec, bclk_reg, - WM8915_AIF1_BCLK_DIV_MASK, best); - } -} - -static int wm8915_set_bias_level(struct snd_soc_codec *codec, - enum snd_soc_bias_level level) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int ret; - - switch (level) { - case SND_SOC_BIAS_ON: - break; - - case SND_SOC_BIAS_PREPARE: - if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) { - snd_soc_update_bits(codec, WM8915_POWER_MANAGEMENT_1, - WM8915_BG_ENA, WM8915_BG_ENA); - msleep(2); - } - break; - - case SND_SOC_BIAS_STANDBY: - if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { - ret = regulator_bulk_enable(ARRAY_SIZE(wm8915->supplies), - wm8915->supplies); - if (ret != 0) { - dev_err(codec->dev, - "Failed to enable supplies: %d\n", - ret); - return ret; - } - - if (wm8915->pdata.ldo_ena >= 0) { - gpio_set_value_cansleep(wm8915->pdata.ldo_ena, - 1); - msleep(5); - } - - codec->cache_only = false; - snd_soc_cache_sync(codec); - } - - snd_soc_update_bits(codec, WM8915_POWER_MANAGEMENT_1, - WM8915_BG_ENA, 0); - break; - - case SND_SOC_BIAS_OFF: - codec->cache_only = true; - if (wm8915->pdata.ldo_ena >= 0) - gpio_set_value_cansleep(wm8915->pdata.ldo_ena, 0); - regulator_bulk_disable(ARRAY_SIZE(wm8915->supplies), - wm8915->supplies); - break; - } - - codec->dapm.bias_level = level; - - return 0; -} - -static int wm8915_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) -{ - struct snd_soc_codec *codec = dai->codec; - int aifctrl = 0; - int bclk = 0; - int lrclk_tx = 0; - int lrclk_rx = 0; - int aifctrl_reg, bclk_reg, lrclk_tx_reg, lrclk_rx_reg; - - switch (dai->id) { - case 0: - aifctrl_reg = WM8915_AIF1_CONTROL; - bclk_reg = WM8915_AIF1_BCLK; - lrclk_tx_reg = WM8915_AIF1_TX_LRCLK_2; - lrclk_rx_reg = WM8915_AIF1_RX_LRCLK_2; - break; - case 1: - aifctrl_reg = WM8915_AIF2_CONTROL; - bclk_reg = WM8915_AIF2_BCLK; - lrclk_tx_reg = WM8915_AIF2_TX_LRCLK_2; - lrclk_rx_reg = WM8915_AIF2_RX_LRCLK_2; - break; - default: - BUG(); - return -EINVAL; - } - - switch (fmt & SND_SOC_DAIFMT_INV_MASK) { - case SND_SOC_DAIFMT_NB_NF: - break; - case SND_SOC_DAIFMT_IB_NF: - bclk |= WM8915_AIF1_BCLK_INV; - break; - case SND_SOC_DAIFMT_NB_IF: - lrclk_tx |= WM8915_AIF1TX_LRCLK_INV; - lrclk_rx |= WM8915_AIF1RX_LRCLK_INV; - break; - case SND_SOC_DAIFMT_IB_IF: - bclk |= WM8915_AIF1_BCLK_INV; - lrclk_tx |= WM8915_AIF1TX_LRCLK_INV; - lrclk_rx |= WM8915_AIF1RX_LRCLK_INV; - break; - } - - switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { - case SND_SOC_DAIFMT_CBS_CFS: - break; - case SND_SOC_DAIFMT_CBS_CFM: - lrclk_tx |= WM8915_AIF1TX_LRCLK_MSTR; - lrclk_rx |= WM8915_AIF1RX_LRCLK_MSTR; - break; - case SND_SOC_DAIFMT_CBM_CFS: - bclk |= WM8915_AIF1_BCLK_MSTR; - break; - case SND_SOC_DAIFMT_CBM_CFM: - bclk |= WM8915_AIF1_BCLK_MSTR; - lrclk_tx |= WM8915_AIF1TX_LRCLK_MSTR; - lrclk_rx |= WM8915_AIF1RX_LRCLK_MSTR; - break; - default: - return -EINVAL; - } - - switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { - case SND_SOC_DAIFMT_DSP_A: - break; - case SND_SOC_DAIFMT_DSP_B: - aifctrl |= 1; - break; - case SND_SOC_DAIFMT_I2S: - aifctrl |= 2; - break; - case SND_SOC_DAIFMT_LEFT_J: - aifctrl |= 3; - break; - default: - return -EINVAL; - } - - snd_soc_update_bits(codec, aifctrl_reg, WM8915_AIF1_FMT_MASK, aifctrl); - snd_soc_update_bits(codec, bclk_reg, - WM8915_AIF1_BCLK_INV | WM8915_AIF1_BCLK_MSTR, - bclk); - snd_soc_update_bits(codec, lrclk_tx_reg, - WM8915_AIF1TX_LRCLK_INV | - WM8915_AIF1TX_LRCLK_MSTR, - lrclk_tx); - snd_soc_update_bits(codec, lrclk_rx_reg, - WM8915_AIF1RX_LRCLK_INV | - WM8915_AIF1RX_LRCLK_MSTR, - lrclk_rx); - - return 0; -} - -static const int dsp_divs[] = { - 48000, 32000, 16000, 8000 -}; - -static int wm8915_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) -{ - struct snd_soc_codec *codec = dai->codec; - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int bits, i, bclk_rate; - int aifdata = 0; - int lrclk = 0; - int dsp = 0; - int aifdata_reg, lrclk_reg, dsp_shift; - - switch (dai->id) { - case 0: - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || - (snd_soc_read(codec, WM8915_GPIO_1)) & WM8915_GP1_FN_MASK) { - aifdata_reg = WM8915_AIF1RX_DATA_CONFIGURATION; - lrclk_reg = WM8915_AIF1_RX_LRCLK_1; - } else { - aifdata_reg = WM8915_AIF1TX_DATA_CONFIGURATION_1; - lrclk_reg = WM8915_AIF1_TX_LRCLK_1; - } - dsp_shift = 0; - break; - case 1: - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || - (snd_soc_read(codec, WM8915_GPIO_2)) & WM8915_GP2_FN_MASK) { - aifdata_reg = WM8915_AIF2RX_DATA_CONFIGURATION; - lrclk_reg = WM8915_AIF2_RX_LRCLK_1; - } else { - aifdata_reg = WM8915_AIF2TX_DATA_CONFIGURATION_1; - lrclk_reg = WM8915_AIF2_TX_LRCLK_1; - } - dsp_shift = WM8915_DSP2_DIV_SHIFT; - break; - default: - BUG(); - return -EINVAL; - } - - bclk_rate = snd_soc_params_to_bclk(params); - if (bclk_rate < 0) { - dev_err(codec->dev, "Unsupported BCLK rate: %d\n", bclk_rate); - return bclk_rate; - } - - wm8915->bclk_rate[dai->id] = bclk_rate; - wm8915->rx_rate[dai->id] = params_rate(params); - - /* Needs looking at for TDM */ - bits = snd_pcm_format_width(params_format(params)); - if (bits < 0) - return bits; - aifdata |= (bits << WM8915_AIF1TX_WL_SHIFT) | bits; - - for (i = 0; i < ARRAY_SIZE(dsp_divs); i++) { - if (dsp_divs[i] == params_rate(params)) - break; - } - if (i == ARRAY_SIZE(dsp_divs)) { - dev_err(codec->dev, "Unsupported sample rate %dHz\n", - params_rate(params)); - return -EINVAL; - } - dsp |= i << dsp_shift; - - wm8915_update_bclk(codec); - - lrclk = bclk_rate / params_rate(params); - dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n", - lrclk, bclk_rate / lrclk); - - snd_soc_update_bits(codec, aifdata_reg, - WM8915_AIF1TX_WL_MASK | - WM8915_AIF1TX_SLOT_LEN_MASK, - aifdata); - snd_soc_update_bits(codec, lrclk_reg, WM8915_AIF1RX_RATE_MASK, - lrclk); - snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_2, - WM8915_DSP1_DIV_SHIFT << dsp_shift, dsp); - - return 0; -} - -static int wm8915_set_sysclk(struct snd_soc_dai *dai, - int clk_id, unsigned int freq, int dir) -{ - struct snd_soc_codec *codec = dai->codec; - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int lfclk = 0; - int ratediv = 0; - int src; - int old; - - if (freq == wm8915->sysclk && clk_id == wm8915->sysclk_src) - return 0; - - /* Disable SYSCLK while we reconfigure */ - old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1) & WM8915_SYSCLK_ENA; - snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1, - WM8915_SYSCLK_ENA, 0); - - switch (clk_id) { - case WM8915_SYSCLK_MCLK1: - wm8915->sysclk = freq; - src = 0; - break; - case WM8915_SYSCLK_MCLK2: - wm8915->sysclk = freq; - src = 1; - break; - case WM8915_SYSCLK_FLL: - wm8915->sysclk = freq; - src = 2; - break; - default: - dev_err(codec->dev, "Unsupported clock source %d\n", clk_id); - return -EINVAL; - } - - switch (wm8915->sysclk) { - case 6144000: - snd_soc_update_bits(codec, WM8915_AIF_RATE, - WM8915_SYSCLK_RATE, 0); - break; - case 24576000: - ratediv = WM8915_SYSCLK_DIV; - case 12288000: - snd_soc_update_bits(codec, WM8915_AIF_RATE, - WM8915_SYSCLK_RATE, WM8915_SYSCLK_RATE); - break; - case 32000: - case 32768: - lfclk = WM8915_LFCLK_ENA; - break; - default: - dev_warn(codec->dev, "Unsupported clock rate %dHz\n", - wm8915->sysclk); - return -EINVAL; - } - - wm8915_update_bclk(codec); - - snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1, - WM8915_SYSCLK_SRC_MASK | WM8915_SYSCLK_DIV_MASK, - src << WM8915_SYSCLK_SRC_SHIFT | ratediv); - snd_soc_update_bits(codec, WM8915_CLOCKING_1, WM8915_LFCLK_ENA, lfclk); - snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1, - WM8915_SYSCLK_ENA, old); - - wm8915->sysclk_src = clk_id; - - return 0; -} - -struct _fll_div { - u16 fll_fratio; - u16 fll_outdiv; - u16 fll_refclk_div; - u16 fll_loop_gain; - u16 fll_ref_freq; - u16 n; - u16 theta; - u16 lambda; -}; - -static struct { - unsigned int min; - unsigned int max; - u16 fll_fratio; - int ratio; -} fll_fratios[] = { - { 0, 64000, 4, 16 }, - { 64000, 128000, 3, 8 }, - { 128000, 256000, 2, 4 }, - { 256000, 1000000, 1, 2 }, - { 1000000, 13500000, 0, 1 }, -}; - -static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, - unsigned int Fout) -{ - unsigned int target; - unsigned int div; - unsigned int fratio, gcd_fll; - int i; - - /* Fref must be <=13.5MHz */ - div = 1; - fll_div->fll_refclk_div = 0; - while ((Fref / div) > 13500000) { - div *= 2; - fll_div->fll_refclk_div++; - - if (div > 8) { - pr_err("Can't scale %dMHz input down to <=13.5MHz\n", - Fref); - return -EINVAL; - } - } - - pr_debug("FLL Fref=%u Fout=%u\n", Fref, Fout); - - /* Apply the division for our remaining calculations */ - Fref /= div; - - if (Fref >= 3000000) - fll_div->fll_loop_gain = 5; - else - fll_div->fll_loop_gain = 0; - - if (Fref >= 48000) - fll_div->fll_ref_freq = 0; - else - fll_div->fll_ref_freq = 1; - - /* Fvco should be 90-100MHz; don't check the upper bound */ - div = 2; - while (Fout * div < 90000000) { - div++; - if (div > 64) { - pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n", - Fout); - return -EINVAL; - } - } - target = Fout * div; - fll_div->fll_outdiv = div - 1; - - pr_debug("FLL Fvco=%dHz\n", target); - - /* Find an appropraite FLL_FRATIO and factor it out of the target */ - for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) { - if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) { - fll_div->fll_fratio = fll_fratios[i].fll_fratio; - fratio = fll_fratios[i].ratio; - break; - } - } - if (i == ARRAY_SIZE(fll_fratios)) { - pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref); - return -EINVAL; - } - - fll_div->n = target / (fratio * Fref); - - if (target % Fref == 0) { - fll_div->theta = 0; - fll_div->lambda = 0; - } else { - gcd_fll = gcd(target, fratio * Fref); - - fll_div->theta = (target - (fll_div->n * fratio * Fref)) - / gcd_fll; - fll_div->lambda = (fratio * Fref) / gcd_fll; - } - - pr_debug("FLL N=%x THETA=%x LAMBDA=%x\n", - fll_div->n, fll_div->theta, fll_div->lambda); - pr_debug("FLL_FRATIO=%x FLL_OUTDIV=%x FLL_REFCLK_DIV=%x\n", - fll_div->fll_fratio, fll_div->fll_outdiv, - fll_div->fll_refclk_div); - - return 0; -} - -static int wm8915_set_fll(struct snd_soc_codec *codec, int fll_id, int source, - unsigned int Fref, unsigned int Fout) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - struct i2c_client *i2c = to_i2c_client(codec->dev); - struct _fll_div fll_div; - unsigned long timeout; - int ret, reg; - - /* Any change? */ - if (source == wm8915->fll_src && Fref == wm8915->fll_fref && - Fout == wm8915->fll_fout) - return 0; - - if (Fout == 0) { - dev_dbg(codec->dev, "FLL disabled\n"); - - wm8915->fll_fref = 0; - wm8915->fll_fout = 0; - - snd_soc_update_bits(codec, WM8915_FLL_CONTROL_1, - WM8915_FLL_ENA, 0); - - return 0; - } - - ret = fll_factors(&fll_div, Fref, Fout); - if (ret != 0) - return ret; - - switch (source) { - case WM8915_FLL_MCLK1: - reg = 0; - break; - case WM8915_FLL_MCLK2: - reg = 1; - break; - case WM8915_FLL_DACLRCLK1: - reg = 2; - break; - case WM8915_FLL_BCLK1: - reg = 3; - break; - default: - dev_err(codec->dev, "Unknown FLL source %d\n", ret); - return -EINVAL; - } - - reg |= fll_div.fll_refclk_div << WM8915_FLL_REFCLK_DIV_SHIFT; - reg |= fll_div.fll_ref_freq << WM8915_FLL_REF_FREQ_SHIFT; - - snd_soc_update_bits(codec, WM8915_FLL_CONTROL_5, - WM8915_FLL_REFCLK_DIV_MASK | WM8915_FLL_REF_FREQ | - WM8915_FLL_REFCLK_SRC_MASK, reg); - - reg = 0; - if (fll_div.theta || fll_div.lambda) - reg |= WM8915_FLL_EFS_ENA | (3 << WM8915_FLL_LFSR_SEL_SHIFT); - else - reg |= 1 << WM8915_FLL_LFSR_SEL_SHIFT; - snd_soc_write(codec, WM8915_FLL_EFS_2, reg); - - snd_soc_update_bits(codec, WM8915_FLL_CONTROL_2, - WM8915_FLL_OUTDIV_MASK | - WM8915_FLL_FRATIO_MASK, - (fll_div.fll_outdiv << WM8915_FLL_OUTDIV_SHIFT) | - (fll_div.fll_fratio)); - - snd_soc_write(codec, WM8915_FLL_CONTROL_3, fll_div.theta); - - snd_soc_update_bits(codec, WM8915_FLL_CONTROL_4, - WM8915_FLL_N_MASK | WM8915_FLL_LOOP_GAIN_MASK, - (fll_div.n << WM8915_FLL_N_SHIFT) | - fll_div.fll_loop_gain); - - snd_soc_write(codec, WM8915_FLL_EFS_1, fll_div.lambda); - - snd_soc_update_bits(codec, WM8915_FLL_CONTROL_1, - WM8915_FLL_ENA, WM8915_FLL_ENA); - - /* The FLL supports live reconfiguration - kick that in case we were - * already enabled. - */ - snd_soc_write(codec, WM8915_FLL_CONTROL_6, WM8915_FLL_SWITCH_CLK); - - /* Wait for the FLL to lock, using the interrupt if possible */ - if (Fref > 1000000) - timeout = usecs_to_jiffies(300); - else - timeout = msecs_to_jiffies(2); - - /* Allow substantially longer if we've actually got the IRQ */ - if (i2c->irq) - timeout *= 1000; - - ret = wait_for_completion_timeout(&wm8915->fll_lock, timeout); - - if (ret == 0 && i2c->irq) { - dev_err(codec->dev, "Timed out waiting for FLL\n"); - ret = -ETIMEDOUT; - } else { - ret = 0; - } - - dev_dbg(codec->dev, "FLL configured for %dHz->%dHz\n", Fref, Fout); - - wm8915->fll_fref = Fref; - wm8915->fll_fout = Fout; - wm8915->fll_src = source; - - return ret; -} - -#ifdef CONFIG_GPIOLIB -static inline struct wm8915_priv *gpio_to_wm8915(struct gpio_chip *chip) -{ - return container_of(chip, struct wm8915_priv, gpio_chip); -} - -static void wm8915_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct wm8915_priv *wm8915 = gpio_to_wm8915(chip); - struct snd_soc_codec *codec = wm8915->codec; - - snd_soc_update_bits(codec, WM8915_GPIO_1 + offset, - WM8915_GP1_LVL, !!value << WM8915_GP1_LVL_SHIFT); -} - -static int wm8915_gpio_direction_out(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct wm8915_priv *wm8915 = gpio_to_wm8915(chip); - struct snd_soc_codec *codec = wm8915->codec; - int val; - - val = (1 << WM8915_GP1_FN_SHIFT) | (!!value << WM8915_GP1_LVL_SHIFT); - - return snd_soc_update_bits(codec, WM8915_GPIO_1 + offset, - WM8915_GP1_FN_MASK | WM8915_GP1_DIR | - WM8915_GP1_LVL, val); -} - -static int wm8915_gpio_get(struct gpio_chip *chip, unsigned offset) -{ - struct wm8915_priv *wm8915 = gpio_to_wm8915(chip); - struct snd_soc_codec *codec = wm8915->codec; - int ret; - - ret = snd_soc_read(codec, WM8915_GPIO_1 + offset); - if (ret < 0) - return ret; - - return (ret & WM8915_GP1_LVL) != 0; -} - -static int wm8915_gpio_direction_in(struct gpio_chip *chip, unsigned offset) -{ - struct wm8915_priv *wm8915 = gpio_to_wm8915(chip); - struct snd_soc_codec *codec = wm8915->codec; - - return snd_soc_update_bits(codec, WM8915_GPIO_1 + offset, - WM8915_GP1_FN_MASK | WM8915_GP1_DIR, - (1 << WM8915_GP1_FN_SHIFT) | - (1 << WM8915_GP1_DIR_SHIFT)); -} - -static struct gpio_chip wm8915_template_chip = { - .label = "wm8915", - .owner = THIS_MODULE, - .direction_output = wm8915_gpio_direction_out, - .set = wm8915_gpio_set, - .direction_input = wm8915_gpio_direction_in, - .get = wm8915_gpio_get, - .can_sleep = 1, -}; - -static void wm8915_init_gpio(struct snd_soc_codec *codec) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int ret; - - wm8915->gpio_chip = wm8915_template_chip; - wm8915->gpio_chip.ngpio = 5; - wm8915->gpio_chip.dev = codec->dev; - - if (wm8915->pdata.gpio_base) - wm8915->gpio_chip.base = wm8915->pdata.gpio_base; - else - wm8915->gpio_chip.base = -1; - - ret = gpiochip_add(&wm8915->gpio_chip); - if (ret != 0) - dev_err(codec->dev, "Failed to add GPIOs: %d\n", ret); -} - -static void wm8915_free_gpio(struct snd_soc_codec *codec) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int ret; - - ret = gpiochip_remove(&wm8915->gpio_chip); - if (ret != 0) - dev_err(codec->dev, "Failed to remove GPIOs: %d\n", ret); -} -#else -static void wm8915_init_gpio(struct snd_soc_codec *codec) -{ -} - -static void wm8915_free_gpio(struct snd_soc_codec *codec) -{ -} -#endif - -/** - * wm8915_detect - Enable default WM8915 jack detection - * - * The WM8915 has advanced accessory detection support for headsets. - * This function provides a default implementation which integrates - * the majority of this functionality with minimal user configuration. - * - * This will detect headset, headphone and short circuit button and - * will also detect inverted microphone ground connections and update - * the polarity of the connections. - */ -int wm8915_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, - wm8915_polarity_fn polarity_cb) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - - wm8915->jack = jack; - wm8915->detecting = true; - wm8915->polarity_cb = polarity_cb; - - if (wm8915->polarity_cb) - wm8915->polarity_cb(codec, 0); - - /* Clear discarge to avoid noise during detection */ - snd_soc_update_bits(codec, WM8915_MICBIAS_1, - WM8915_MICB1_DISCH, 0); - snd_soc_update_bits(codec, WM8915_MICBIAS_2, - WM8915_MICB2_DISCH, 0); - - /* LDO2 powers the microphones, SYSCLK clocks detection */ - snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2"); - snd_soc_dapm_force_enable_pin(&codec->dapm, "SYSCLK"); - - /* We start off just enabling microphone detection - even a - * plain headphone will trigger detection. - */ - snd_soc_update_bits(codec, WM8915_MIC_DETECT_1, - WM8915_MICD_ENA, WM8915_MICD_ENA); - - /* Slowest detection rate, gives debounce for initial detection */ - snd_soc_update_bits(codec, WM8915_MIC_DETECT_1, - WM8915_MICD_RATE_MASK, - WM8915_MICD_RATE_MASK); - - /* Enable interrupts and we're off */ - snd_soc_update_bits(codec, WM8915_INTERRUPT_STATUS_2_MASK, - WM8915_IM_MICD_EINT, 0); - - return 0; -} -EXPORT_SYMBOL_GPL(wm8915_detect); - -static void wm8915_micd(struct snd_soc_codec *codec) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int val, reg; - - val = snd_soc_read(codec, WM8915_MIC_DETECT_3); - - dev_dbg(codec->dev, "Microphone event: %x\n", val); - - if (!(val & WM8915_MICD_VALID)) { - dev_warn(codec->dev, "Microphone detection state invalid\n"); - return; - } - - /* No accessory, reset everything and report removal */ - if (!(val & WM8915_MICD_STS)) { - dev_dbg(codec->dev, "Jack removal detected\n"); - wm8915->jack_mic = false; - wm8915->detecting = true; - snd_soc_jack_report(wm8915->jack, 0, - SND_JACK_HEADSET | SND_JACK_BTN_0); - snd_soc_update_bits(codec, WM8915_MIC_DETECT_1, - WM8915_MICD_RATE_MASK, - WM8915_MICD_RATE_MASK); - return; - } - - /* If the measurement is very high we've got a microphone but - * do a little debounce to account for mechanical issues. - */ - if (val & 0x400) { - dev_dbg(codec->dev, "Microphone detected\n"); - snd_soc_jack_report(wm8915->jack, SND_JACK_HEADSET, - SND_JACK_HEADSET | SND_JACK_BTN_0); - wm8915->jack_mic = true; - wm8915->detecting = false; - - /* Increase poll rate to give better responsiveness - * for buttons */ - snd_soc_update_bits(codec, WM8915_MIC_DETECT_1, - WM8915_MICD_RATE_MASK, - 5 << WM8915_MICD_RATE_SHIFT); - } - - /* If we detected a lower impedence during initial startup - * then we probably have the wrong polarity, flip it. Don't - * do this for the lowest impedences to speed up detection of - * plain headphones. - */ - if (wm8915->detecting && (val & 0x3f0)) { - reg = snd_soc_read(codec, WM8915_ACCESSORY_DETECT_MODE_2); - reg ^= WM8915_HPOUT1FB_SRC | WM8915_MICD_SRC | - WM8915_MICD_BIAS_SRC; - snd_soc_update_bits(codec, WM8915_ACCESSORY_DETECT_MODE_2, - WM8915_HPOUT1FB_SRC | WM8915_MICD_SRC | - WM8915_MICD_BIAS_SRC, reg); - - if (wm8915->polarity_cb) - wm8915->polarity_cb(codec, - (reg & WM8915_MICD_SRC) != 0); - - dev_dbg(codec->dev, "Set microphone polarity to %d\n", - (reg & WM8915_MICD_SRC) != 0); - - return; - } - - /* Don't distinguish between buttons, just report any low - * impedence as BTN_0. - */ - if (val & 0x3fc) { - if (wm8915->jack_mic) { - dev_dbg(codec->dev, "Mic button detected\n"); - snd_soc_jack_report(wm8915->jack, - SND_JACK_HEADSET | SND_JACK_BTN_0, - SND_JACK_HEADSET | SND_JACK_BTN_0); - } else { - dev_dbg(codec->dev, "Headphone detected\n"); - snd_soc_jack_report(wm8915->jack, - SND_JACK_HEADPHONE, - SND_JACK_HEADSET | - SND_JACK_BTN_0); - - /* Increase the detection rate a bit for - * responsiveness. - */ - snd_soc_update_bits(codec, WM8915_MIC_DETECT_1, - WM8915_MICD_RATE_MASK, - 7 << WM8915_MICD_RATE_SHIFT); - - wm8915->detecting = false; - } - } -} - -static irqreturn_t wm8915_irq(int irq, void *data) -{ - struct snd_soc_codec *codec = data; - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - int irq_val; - - irq_val = snd_soc_read(codec, WM8915_INTERRUPT_STATUS_2); - if (irq_val < 0) { - dev_err(codec->dev, "Failed to read IRQ status: %d\n", - irq_val); - return IRQ_NONE; - } - irq_val &= ~snd_soc_read(codec, WM8915_INTERRUPT_STATUS_2_MASK); - - if (irq_val & (WM8915_DCS_DONE_01_EINT | WM8915_DCS_DONE_23_EINT)) { - dev_dbg(codec->dev, "DC servo IRQ\n"); - complete(&wm8915->dcs_done); - } - - if (irq_val & WM8915_FIFOS_ERR_EINT) - dev_err(codec->dev, "Digital core FIFO error\n"); - - if (irq_val & WM8915_FLL_LOCK_EINT) { - dev_dbg(codec->dev, "FLL locked\n"); - complete(&wm8915->fll_lock); - } - - if (irq_val & WM8915_MICD_EINT) - wm8915_micd(codec); - - if (irq_val) { - snd_soc_write(codec, WM8915_INTERRUPT_STATUS_2, irq_val); - - return IRQ_HANDLED; - } else { - return IRQ_NONE; - } -} - -static irqreturn_t wm8915_edge_irq(int irq, void *data) -{ - irqreturn_t ret = IRQ_NONE; - irqreturn_t val; - - do { - val = wm8915_irq(irq, data); - if (val != IRQ_NONE) - ret = val; - } while (val != IRQ_NONE); - - return ret; -} - -static void wm8915_retune_mobile_pdata(struct snd_soc_codec *codec) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - struct wm8915_pdata *pdata = &wm8915->pdata; - - struct snd_kcontrol_new controls[] = { - SOC_ENUM_EXT("DSP1 EQ Mode", - wm8915->retune_mobile_enum, - wm8915_get_retune_mobile_enum, - wm8915_put_retune_mobile_enum), - SOC_ENUM_EXT("DSP2 EQ Mode", - wm8915->retune_mobile_enum, - wm8915_get_retune_mobile_enum, - wm8915_put_retune_mobile_enum), - }; - int ret, i, j; - const char **t; - - /* We need an array of texts for the enum API but the number - * of texts is likely to be less than the number of - * configurations due to the sample rate dependency of the - * configurations. */ - wm8915->num_retune_mobile_texts = 0; - wm8915->retune_mobile_texts = NULL; - for (i = 0; i < pdata->num_retune_mobile_cfgs; i++) { - for (j = 0; j < wm8915->num_retune_mobile_texts; j++) { - if (strcmp(pdata->retune_mobile_cfgs[i].name, - wm8915->retune_mobile_texts[j]) == 0) - break; - } - - if (j != wm8915->num_retune_mobile_texts) - continue; - - /* Expand the array... */ - t = krealloc(wm8915->retune_mobile_texts, - sizeof(char *) * - (wm8915->num_retune_mobile_texts + 1), - GFP_KERNEL); - if (t == NULL) - continue; - - /* ...store the new entry... */ - t[wm8915->num_retune_mobile_texts] = - pdata->retune_mobile_cfgs[i].name; - - /* ...and remember the new version. */ - wm8915->num_retune_mobile_texts++; - wm8915->retune_mobile_texts = t; - } - - dev_dbg(codec->dev, "Allocated %d unique ReTune Mobile names\n", - wm8915->num_retune_mobile_texts); - - wm8915->retune_mobile_enum.max = wm8915->num_retune_mobile_texts; - wm8915->retune_mobile_enum.texts = wm8915->retune_mobile_texts; - - ret = snd_soc_add_controls(codec, controls, ARRAY_SIZE(controls)); - if (ret != 0) - dev_err(codec->dev, - "Failed to add ReTune Mobile controls: %d\n", ret); -} - -static int wm8915_probe(struct snd_soc_codec *codec) -{ - int ret; - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - struct i2c_client *i2c = to_i2c_client(codec->dev); - struct snd_soc_dapm_context *dapm = &codec->dapm; - int i, irq_flags; - - wm8915->codec = codec; - - init_completion(&wm8915->dcs_done); - init_completion(&wm8915->fll_lock); - - dapm->idle_bias_off = true; - dapm->bias_level = SND_SOC_BIAS_OFF; - - ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C); - if (ret != 0) { - dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); - goto err; - } - - for (i = 0; i < ARRAY_SIZE(wm8915->supplies); i++) - wm8915->supplies[i].supply = wm8915_supply_names[i]; - - ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8915->supplies), - wm8915->supplies); - if (ret != 0) { - dev_err(codec->dev, "Failed to request supplies: %d\n", ret); - goto err; - } - - wm8915->disable_nb[0].notifier_call = wm8915_regulator_event_0; - wm8915->disable_nb[1].notifier_call = wm8915_regulator_event_1; - wm8915->disable_nb[2].notifier_call = wm8915_regulator_event_2; - wm8915->disable_nb[3].notifier_call = wm8915_regulator_event_3; - - /* This should really be moved into the regulator core */ - for (i = 0; i < ARRAY_SIZE(wm8915->supplies); i++) { - ret = regulator_register_notifier(wm8915->supplies[i].consumer, - &wm8915->disable_nb[i]); - if (ret != 0) { - dev_err(codec->dev, - "Failed to register regulator notifier: %d\n", - ret); - } - } - - ret = regulator_bulk_enable(ARRAY_SIZE(wm8915->supplies), - wm8915->supplies); - if (ret != 0) { - dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); - goto err_get; - } - - if (wm8915->pdata.ldo_ena >= 0) { - gpio_set_value_cansleep(wm8915->pdata.ldo_ena, 1); - msleep(5); - } - - ret = snd_soc_read(codec, WM8915_SOFTWARE_RESET); - if (ret < 0) { - dev_err(codec->dev, "Failed to read ID register: %d\n", ret); - goto err_enable; - } - if (ret != 0x8915) { - dev_err(codec->dev, "Device is not a WM8915, ID %x\n", ret); - ret = -EINVAL; - goto err_enable; - } - - ret = snd_soc_read(codec, WM8915_CHIP_REVISION); - if (ret < 0) { - dev_err(codec->dev, "Failed to read device revision: %d\n", - ret); - goto err_enable; - } - - dev_info(codec->dev, "revision %c\n", - (ret & WM8915_CHIP_REV_MASK) + 'A'); - - if (wm8915->pdata.ldo_ena >= 0) { - gpio_set_value_cansleep(wm8915->pdata.ldo_ena, 0); - } else { - ret = wm8915_reset(codec); - if (ret < 0) { - dev_err(codec->dev, "Failed to issue reset\n"); - goto err_enable; - } - } - - codec->cache_only = true; - - /* Apply platform data settings */ - snd_soc_update_bits(codec, WM8915_LINE_INPUT_CONTROL, - WM8915_INL_MODE_MASK | WM8915_INR_MODE_MASK, - wm8915->pdata.inl_mode << WM8915_INL_MODE_SHIFT | - wm8915->pdata.inr_mode); - - for (i = 0; i < ARRAY_SIZE(wm8915->pdata.gpio_default); i++) { - if (!wm8915->pdata.gpio_default[i]) - continue; - - snd_soc_write(codec, WM8915_GPIO_1 + i, - wm8915->pdata.gpio_default[i] & 0xffff); - } - - if (wm8915->pdata.spkmute_seq) - snd_soc_update_bits(codec, WM8915_PDM_SPEAKER_MUTE_SEQUENCE, - WM8915_SPK_MUTE_ENDIAN | - WM8915_SPK_MUTE_SEQ1_MASK, - wm8915->pdata.spkmute_seq); - - snd_soc_update_bits(codec, WM8915_ACCESSORY_DETECT_MODE_2, - WM8915_MICD_BIAS_SRC | WM8915_HPOUT1FB_SRC | - WM8915_MICD_SRC, wm8915->pdata.micdet_def); - - /* Latch volume update bits */ - snd_soc_update_bits(codec, WM8915_LEFT_LINE_INPUT_VOLUME, - WM8915_IN1_VU, WM8915_IN1_VU); - snd_soc_update_bits(codec, WM8915_RIGHT_LINE_INPUT_VOLUME, - WM8915_IN1_VU, WM8915_IN1_VU); - - snd_soc_update_bits(codec, WM8915_DAC1_LEFT_VOLUME, - WM8915_DAC1_VU, WM8915_DAC1_VU); - snd_soc_update_bits(codec, WM8915_DAC1_RIGHT_VOLUME, - WM8915_DAC1_VU, WM8915_DAC1_VU); - snd_soc_update_bits(codec, WM8915_DAC2_LEFT_VOLUME, - WM8915_DAC2_VU, WM8915_DAC2_VU); - snd_soc_update_bits(codec, WM8915_DAC2_RIGHT_VOLUME, - WM8915_DAC2_VU, WM8915_DAC2_VU); - - snd_soc_update_bits(codec, WM8915_OUTPUT1_LEFT_VOLUME, - WM8915_DAC1_VU, WM8915_DAC1_VU); - snd_soc_update_bits(codec, WM8915_OUTPUT1_RIGHT_VOLUME, - WM8915_DAC1_VU, WM8915_DAC1_VU); - snd_soc_update_bits(codec, WM8915_OUTPUT2_LEFT_VOLUME, - WM8915_DAC2_VU, WM8915_DAC2_VU); - snd_soc_update_bits(codec, WM8915_OUTPUT2_RIGHT_VOLUME, - WM8915_DAC2_VU, WM8915_DAC2_VU); - - snd_soc_update_bits(codec, WM8915_DSP1_TX_LEFT_VOLUME, - WM8915_DSP1TX_VU, WM8915_DSP1TX_VU); - snd_soc_update_bits(codec, WM8915_DSP1_TX_RIGHT_VOLUME, - WM8915_DSP1TX_VU, WM8915_DSP1TX_VU); - snd_soc_update_bits(codec, WM8915_DSP2_TX_LEFT_VOLUME, - WM8915_DSP2TX_VU, WM8915_DSP2TX_VU); - snd_soc_update_bits(codec, WM8915_DSP2_TX_RIGHT_VOLUME, - WM8915_DSP2TX_VU, WM8915_DSP2TX_VU); - - snd_soc_update_bits(codec, WM8915_DSP1_RX_LEFT_VOLUME, - WM8915_DSP1RX_VU, WM8915_DSP1RX_VU); - snd_soc_update_bits(codec, WM8915_DSP1_RX_RIGHT_VOLUME, - WM8915_DSP1RX_VU, WM8915_DSP1RX_VU); - snd_soc_update_bits(codec, WM8915_DSP2_RX_LEFT_VOLUME, - WM8915_DSP2RX_VU, WM8915_DSP2RX_VU); - snd_soc_update_bits(codec, WM8915_DSP2_RX_RIGHT_VOLUME, - WM8915_DSP2RX_VU, WM8915_DSP2RX_VU); - - /* No support currently for the underclocked TDM modes and - * pick a default TDM layout with each channel pair working with - * slots 0 and 1. */ - snd_soc_update_bits(codec, WM8915_AIF1RX_CHANNEL_0_CONFIGURATION, - WM8915_AIF1RX_CHAN0_SLOTS_MASK | - WM8915_AIF1RX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1RX_CHAN0_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1RX_CHANNEL_1_CONFIGURATION, - WM8915_AIF1RX_CHAN1_SLOTS_MASK | - WM8915_AIF1RX_CHAN1_START_SLOT_MASK, - 1 << WM8915_AIF1RX_CHAN1_SLOTS_SHIFT | 1); - snd_soc_update_bits(codec, WM8915_AIF1RX_CHANNEL_2_CONFIGURATION, - WM8915_AIF1RX_CHAN2_SLOTS_MASK | - WM8915_AIF1RX_CHAN2_START_SLOT_MASK, - 1 << WM8915_AIF1RX_CHAN2_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1RX_CHANNEL_3_CONFIGURATION, - WM8915_AIF1RX_CHAN3_SLOTS_MASK | - WM8915_AIF1RX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1RX_CHAN3_SLOTS_SHIFT | 1); - snd_soc_update_bits(codec, WM8915_AIF1RX_CHANNEL_4_CONFIGURATION, - WM8915_AIF1RX_CHAN4_SLOTS_MASK | - WM8915_AIF1RX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1RX_CHAN4_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1RX_CHANNEL_5_CONFIGURATION, - WM8915_AIF1RX_CHAN5_SLOTS_MASK | - WM8915_AIF1RX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1RX_CHAN5_SLOTS_SHIFT | 1); - - snd_soc_update_bits(codec, WM8915_AIF2RX_CHANNEL_0_CONFIGURATION, - WM8915_AIF2RX_CHAN0_SLOTS_MASK | - WM8915_AIF2RX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF2RX_CHAN0_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF2RX_CHANNEL_1_CONFIGURATION, - WM8915_AIF2RX_CHAN1_SLOTS_MASK | - WM8915_AIF2RX_CHAN1_START_SLOT_MASK, - 1 << WM8915_AIF2RX_CHAN1_SLOTS_SHIFT | 1); - - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_0_CONFIGURATION, - WM8915_AIF1TX_CHAN0_SLOTS_MASK | - WM8915_AIF1TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN0_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_1_CONFIGURATION, - WM8915_AIF1TX_CHAN1_SLOTS_MASK | - WM8915_AIF1TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN1_SLOTS_SHIFT | 1); - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_2_CONFIGURATION, - WM8915_AIF1TX_CHAN2_SLOTS_MASK | - WM8915_AIF1TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN2_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_3_CONFIGURATION, - WM8915_AIF1TX_CHAN3_SLOTS_MASK | - WM8915_AIF1TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN3_SLOTS_SHIFT | 1); - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_4_CONFIGURATION, - WM8915_AIF1TX_CHAN4_SLOTS_MASK | - WM8915_AIF1TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN4_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_5_CONFIGURATION, - WM8915_AIF1TX_CHAN5_SLOTS_MASK | - WM8915_AIF1TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN5_SLOTS_SHIFT | 1); - - snd_soc_update_bits(codec, WM8915_AIF2TX_CHANNEL_0_CONFIGURATION, - WM8915_AIF2TX_CHAN0_SLOTS_MASK | - WM8915_AIF2TX_CHAN0_START_SLOT_MASK, - 1 << WM8915_AIF2TX_CHAN0_SLOTS_SHIFT | 0); - snd_soc_update_bits(codec, WM8915_AIF1TX_CHANNEL_1_CONFIGURATION, - WM8915_AIF2TX_CHAN1_SLOTS_MASK | - WM8915_AIF2TX_CHAN1_START_SLOT_MASK, - 1 << WM8915_AIF1TX_CHAN1_SLOTS_SHIFT | 1); - - if (wm8915->pdata.num_retune_mobile_cfgs) - wm8915_retune_mobile_pdata(codec); - else - snd_soc_add_controls(codec, wm8915_eq_controls, - ARRAY_SIZE(wm8915_eq_controls)); - - /* If the TX LRCLK pins are not in LRCLK mode configure the - * AIFs to source their clocks from the RX LRCLKs. - */ - if ((snd_soc_read(codec, WM8915_GPIO_1))) - snd_soc_update_bits(codec, WM8915_AIF1_TX_LRCLK_2, - WM8915_AIF1TX_LRCLK_MODE, - WM8915_AIF1TX_LRCLK_MODE); - - if ((snd_soc_read(codec, WM8915_GPIO_2))) - snd_soc_update_bits(codec, WM8915_AIF2_TX_LRCLK_2, - WM8915_AIF2TX_LRCLK_MODE, - WM8915_AIF2TX_LRCLK_MODE); - - regulator_bulk_disable(ARRAY_SIZE(wm8915->supplies), wm8915->supplies); - - wm8915_init_gpio(codec); - - if (i2c->irq) { - if (wm8915->pdata.irq_flags) - irq_flags = wm8915->pdata.irq_flags; - else - irq_flags = IRQF_TRIGGER_LOW; - - irq_flags |= IRQF_ONESHOT; - - if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) - ret = request_threaded_irq(i2c->irq, NULL, - wm8915_edge_irq, - irq_flags, "wm8915", codec); - else - ret = request_threaded_irq(i2c->irq, NULL, wm8915_irq, - irq_flags, "wm8915", codec); - - if (ret == 0) { - /* Unmask the interrupt */ - snd_soc_update_bits(codec, WM8915_INTERRUPT_CONTROL, - WM8915_IM_IRQ, 0); - - /* Enable error reporting and DC servo status */ - snd_soc_update_bits(codec, - WM8915_INTERRUPT_STATUS_2_MASK, - WM8915_IM_DCS_DONE_23_EINT | - WM8915_IM_DCS_DONE_01_EINT | - WM8915_IM_FLL_LOCK_EINT | - WM8915_IM_FIFOS_ERR_EINT, - 0); - } else { - dev_err(codec->dev, "Failed to request IRQ: %d\n", - ret); - } - } - - return 0; - -err_enable: - if (wm8915->pdata.ldo_ena >= 0) - gpio_set_value_cansleep(wm8915->pdata.ldo_ena, 0); - - regulator_bulk_disable(ARRAY_SIZE(wm8915->supplies), wm8915->supplies); -err_get: - regulator_bulk_free(ARRAY_SIZE(wm8915->supplies), wm8915->supplies); -err: - return ret; -} - -static int wm8915_remove(struct snd_soc_codec *codec) -{ - struct wm8915_priv *wm8915 = snd_soc_codec_get_drvdata(codec); - struct i2c_client *i2c = to_i2c_client(codec->dev); - int i; - - snd_soc_update_bits(codec, WM8915_INTERRUPT_CONTROL, - WM8915_IM_IRQ, WM8915_IM_IRQ); - - if (i2c->irq) - free_irq(i2c->irq, codec); - - wm8915_free_gpio(codec); - - for (i = 0; i < ARRAY_SIZE(wm8915->supplies); i++) - regulator_unregister_notifier(wm8915->supplies[i].consumer, - &wm8915->disable_nb[i]); - regulator_bulk_free(ARRAY_SIZE(wm8915->supplies), wm8915->supplies); - - return 0; -} - -static struct snd_soc_codec_driver soc_codec_dev_wm8915 = { - .probe = wm8915_probe, - .remove = wm8915_remove, - .set_bias_level = wm8915_set_bias_level, - .seq_notifier = wm8915_seq_notifier, - .reg_cache_size = WM8915_MAX_REGISTER + 1, - .reg_word_size = sizeof(u16), - .reg_cache_default = wm8915_reg, - .volatile_register = wm8915_volatile_register, - .readable_register = wm8915_readable_register, - .compress_type = SND_SOC_RBTREE_COMPRESSION, - .controls = wm8915_snd_controls, - .num_controls = ARRAY_SIZE(wm8915_snd_controls), - .dapm_widgets = wm8915_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(wm8915_dapm_widgets), - .dapm_routes = wm8915_dapm_routes, - .num_dapm_routes = ARRAY_SIZE(wm8915_dapm_routes), - .set_pll = wm8915_set_fll, -}; - -#define WM8915_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ - SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000) -#define WM8915_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE |\ - SNDRV_PCM_FMTBIT_S32_LE) - -static struct snd_soc_dai_ops wm8915_dai_ops = { - .set_fmt = wm8915_set_fmt, - .hw_params = wm8915_hw_params, - .set_sysclk = wm8915_set_sysclk, -}; - -static struct snd_soc_dai_driver wm8915_dai[] = { - { - .name = "wm8915-aif1", - .playback = { - .stream_name = "AIF1 Playback", - .channels_min = 1, - .channels_max = 6, - .rates = WM8915_RATES, - .formats = WM8915_FORMATS, - }, - .capture = { - .stream_name = "AIF1 Capture", - .channels_min = 1, - .channels_max = 6, - .rates = WM8915_RATES, - .formats = WM8915_FORMATS, - }, - .ops = &wm8915_dai_ops, - }, - { - .name = "wm8915-aif2", - .playback = { - .stream_name = "AIF2 Playback", - .channels_min = 1, - .channels_max = 2, - .rates = WM8915_RATES, - .formats = WM8915_FORMATS, - }, - .capture = { - .stream_name = "AIF2 Capture", - .channels_min = 1, - .channels_max = 2, - .rates = WM8915_RATES, - .formats = WM8915_FORMATS, - }, - .ops = &wm8915_dai_ops, - }, -}; - -static __devinit int wm8915_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) -{ - struct wm8915_priv *wm8915; - int ret; - - wm8915 = kzalloc(sizeof(struct wm8915_priv), GFP_KERNEL); - if (wm8915 == NULL) - return -ENOMEM; - - i2c_set_clientdata(i2c, wm8915); - - if (dev_get_platdata(&i2c->dev)) - memcpy(&wm8915->pdata, dev_get_platdata(&i2c->dev), - sizeof(wm8915->pdata)); - - if (wm8915->pdata.ldo_ena > 0) { - ret = gpio_request_one(wm8915->pdata.ldo_ena, - GPIOF_OUT_INIT_LOW, "WM8915 ENA"); - if (ret < 0) { - dev_err(&i2c->dev, "Failed to request GPIO %d: %d\n", - wm8915->pdata.ldo_ena, ret); - goto err; - } - } - - ret = snd_soc_register_codec(&i2c->dev, - &soc_codec_dev_wm8915, wm8915_dai, - ARRAY_SIZE(wm8915_dai)); - if (ret < 0) - goto err_gpio; - - return ret; - -err_gpio: - if (wm8915->pdata.ldo_ena > 0) - gpio_free(wm8915->pdata.ldo_ena); -err: - kfree(wm8915); - - return ret; -} - -static __devexit int wm8915_i2c_remove(struct i2c_client *client) -{ - struct wm8915_priv *wm8915 = i2c_get_clientdata(client); - - snd_soc_unregister_codec(&client->dev); - if (wm8915->pdata.ldo_ena > 0) - gpio_free(wm8915->pdata.ldo_ena); - kfree(i2c_get_clientdata(client)); - return 0; -} - -static const struct i2c_device_id wm8915_i2c_id[] = { - { "wm8915", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, wm8915_i2c_id); - -static struct i2c_driver wm8915_i2c_driver = { - .driver = { - .name = "wm8915", - .owner = THIS_MODULE, - }, - .probe = wm8915_i2c_probe, - .remove = __devexit_p(wm8915_i2c_remove), - .id_table = wm8915_i2c_id, -}; - -static int __init wm8915_modinit(void) -{ - int ret; - - ret = i2c_add_driver(&wm8915_i2c_driver); - if (ret != 0) { - printk(KERN_ERR "Failed to register WM8915 I2C driver: %d\n", - ret); - } - - return ret; -} -module_init(wm8915_modinit); - -static void __exit wm8915_exit(void) -{ - i2c_del_driver(&wm8915_i2c_driver); -} -module_exit(wm8915_exit); - -MODULE_DESCRIPTION("ASoC WM8915 driver"); -MODULE_AUTHOR("Mark Brown "); -MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/wm8915.h b/sound/soc/codecs/wm8915.h deleted file mode 100644 index 200ffd7..0000000 --- a/sound/soc/codecs/wm8915.h +++ /dev/null @@ -1,3717 +0,0 @@ -/* - * wm8915.h - WM8915 audio codec interface - * - * Copyright 2011 Wolfson Microelectronics PLC. - * Author: Mark Brown - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _WM8915_H -#define _WM8915_H - -#define WM8915_SYSCLK_MCLK1 1 -#define WM8915_SYSCLK_MCLK2 2 -#define WM8915_SYSCLK_FLL 3 - -#define WM8915_FLL_MCLK1 1 -#define WM8915_FLL_MCLK2 2 -#define WM8915_FLL_DACLRCLK1 3 -#define WM8915_FLL_BCLK1 4 - -typedef void (*wm8915_polarity_fn)(struct snd_soc_codec *codec, int polarity); - -int wm8915_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, - wm8915_polarity_fn polarity_cb); - -/* - * Register values. - */ -#define WM8915_SOFTWARE_RESET 0x00 -#define WM8915_POWER_MANAGEMENT_1 0x01 -#define WM8915_POWER_MANAGEMENT_2 0x02 -#define WM8915_POWER_MANAGEMENT_3 0x03 -#define WM8915_POWER_MANAGEMENT_4 0x04 -#define WM8915_POWER_MANAGEMENT_5 0x05 -#define WM8915_POWER_MANAGEMENT_6 0x06 -#define WM8915_POWER_MANAGEMENT_7 0x07 -#define WM8915_POWER_MANAGEMENT_8 0x08 -#define WM8915_LEFT_LINE_INPUT_VOLUME 0x10 -#define WM8915_RIGHT_LINE_INPUT_VOLUME 0x11 -#define WM8915_LINE_INPUT_CONTROL 0x12 -#define WM8915_DAC1_HPOUT1_VOLUME 0x15 -#define WM8915_DAC2_HPOUT2_VOLUME 0x16 -#define WM8915_DAC1_LEFT_VOLUME 0x18 -#define WM8915_DAC1_RIGHT_VOLUME 0x19 -#define WM8915_DAC2_LEFT_VOLUME 0x1A -#define WM8915_DAC2_RIGHT_VOLUME 0x1B -#define WM8915_OUTPUT1_LEFT_VOLUME 0x1C -#define WM8915_OUTPUT1_RIGHT_VOLUME 0x1D -#define WM8915_OUTPUT2_LEFT_VOLUME 0x1E -#define WM8915_OUTPUT2_RIGHT_VOLUME 0x1F -#define WM8915_MICBIAS_1 0x20 -#define WM8915_MICBIAS_2 0x21 -#define WM8915_LDO_1 0x28 -#define WM8915_LDO_2 0x29 -#define WM8915_ACCESSORY_DETECT_MODE_1 0x30 -#define WM8915_ACCESSORY_DETECT_MODE_2 0x31 -#define WM8915_HEADPHONE_DETECT_1 0x34 -#define WM8915_HEADPHONE_DETECT_2 0x35 -#define WM8915_MIC_DETECT_1 0x38 -#define WM8915_MIC_DETECT_2 0x39 -#define WM8915_MIC_DETECT_3 0x3A -#define WM8915_CHARGE_PUMP_1 0x40 -#define WM8915_CHARGE_PUMP_2 0x41 -#define WM8915_DC_SERVO_1 0x50 -#define WM8915_DC_SERVO_2 0x51 -#define WM8915_DC_SERVO_3 0x52 -#define WM8915_DC_SERVO_5 0x54 -#define WM8915_DC_SERVO_6 0x55 -#define WM8915_DC_SERVO_7 0x56 -#define WM8915_DC_SERVO_READBACK_0 0x57 -#define WM8915_ANALOGUE_HP_1 0x60 -#define WM8915_ANALOGUE_HP_2 0x61 -#define WM8915_CHIP_REVISION 0x100 -#define WM8915_CONTROL_INTERFACE_1 0x101 -#define WM8915_WRITE_SEQUENCER_CTRL_1 0x110 -#define WM8915_WRITE_SEQUENCER_CTRL_2 0x111 -#define WM8915_AIF_CLOCKING_1 0x200 -#define WM8915_AIF_CLOCKING_2 0x201 -#define WM8915_CLOCKING_1 0x208 -#define WM8915_CLOCKING_2 0x209 -#define WM8915_AIF_RATE 0x210 -#define WM8915_FLL_CONTROL_1 0x220 -#define WM8915_FLL_CONTROL_2 0x221 -#define WM8915_FLL_CONTROL_3 0x222 -#define WM8915_FLL_CONTROL_4 0x223 -#define WM8915_FLL_CONTROL_5 0x224 -#define WM8915_FLL_CONTROL_6 0x225 -#define WM8915_FLL_EFS_1 0x226 -#define WM8915_FLL_EFS_2 0x227 -#define WM8915_AIF1_CONTROL 0x300 -#define WM8915_AIF1_BCLK 0x301 -#define WM8915_AIF1_TX_LRCLK_1 0x302 -#define WM8915_AIF1_TX_LRCLK_2 0x303 -#define WM8915_AIF1_RX_LRCLK_1 0x304 -#define WM8915_AIF1_RX_LRCLK_2 0x305 -#define WM8915_AIF1TX_DATA_CONFIGURATION_1 0x306 -#define WM8915_AIF1TX_DATA_CONFIGURATION_2 0x307 -#define WM8915_AIF1RX_DATA_CONFIGURATION 0x308 -#define WM8915_AIF1TX_CHANNEL_0_CONFIGURATION 0x309 -#define WM8915_AIF1TX_CHANNEL_1_CONFIGURATION 0x30A -#define WM8915_AIF1TX_CHANNEL_2_CONFIGURATION 0x30B -#define WM8915_AIF1TX_CHANNEL_3_CONFIGURATION 0x30C -#define WM8915_AIF1TX_CHANNEL_4_CONFIGURATION 0x30D -#define WM8915_AIF1TX_CHANNEL_5_CONFIGURATION 0x30E -#define WM8915_AIF1RX_CHANNEL_0_CONFIGURATION 0x30F -#define WM8915_AIF1RX_CHANNEL_1_CONFIGURATION 0x310 -#define WM8915_AIF1RX_CHANNEL_2_CONFIGURATION 0x311 -#define WM8915_AIF1RX_CHANNEL_3_CONFIGURATION 0x312 -#define WM8915_AIF1RX_CHANNEL_4_CONFIGURATION 0x313 -#define WM8915_AIF1RX_CHANNEL_5_CONFIGURATION 0x314 -#define WM8915_AIF1RX_MONO_CONFIGURATION 0x315 -#define WM8915_AIF1TX_TEST 0x31A -#define WM8915_AIF2_CONTROL 0x320 -#define WM8915_AIF2_BCLK 0x321 -#define WM8915_AIF2_TX_LRCLK_1 0x322 -#define WM8915_AIF2_TX_LRCLK_2 0x323 -#define WM8915_AIF2_RX_LRCLK_1 0x324 -#define WM8915_AIF2_RX_LRCLK_2 0x325 -#define WM8915_AIF2TX_DATA_CONFIGURATION_1 0x326 -#define WM8915_AIF2TX_DATA_CONFIGURATION_2 0x327 -#define WM8915_AIF2RX_DATA_CONFIGURATION 0x328 -#define WM8915_AIF2TX_CHANNEL_0_CONFIGURATION 0x329 -#define WM8915_AIF2TX_CHANNEL_1_CONFIGURATION 0x32A -#define WM8915_AIF2RX_CHANNEL_0_CONFIGURATION 0x32B -#define WM8915_AIF2RX_CHANNEL_1_CONFIGURATION 0x32C -#define WM8915_AIF2RX_MONO_CONFIGURATION 0x32D -#define WM8915_AIF2TX_TEST 0x32F -#define WM8915_DSP1_TX_LEFT_VOLUME 0x400 -#define WM8915_DSP1_TX_RIGHT_VOLUME 0x401 -#define WM8915_DSP1_RX_LEFT_VOLUME 0x402 -#define WM8915_DSP1_RX_RIGHT_VOLUME 0x403 -#define WM8915_DSP1_TX_FILTERS 0x410 -#define WM8915_DSP1_RX_FILTERS_1 0x420 -#define WM8915_DSP1_RX_FILTERS_2 0x421 -#define WM8915_DSP1_DRC_1 0x440 -#define WM8915_DSP1_DRC_2 0x441 -#define WM8915_DSP1_DRC_3 0x442 -#define WM8915_DSP1_DRC_4 0x443 -#define WM8915_DSP1_DRC_5 0x444 -#define WM8915_DSP1_RX_EQ_GAINS_1 0x480 -#define WM8915_DSP1_RX_EQ_GAINS_2 0x481 -#define WM8915_DSP1_RX_EQ_BAND_1_A 0x482 -#define WM8915_DSP1_RX_EQ_BAND_1_B 0x483 -#define WM8915_DSP1_RX_EQ_BAND_1_PG 0x484 -#define WM8915_DSP1_RX_EQ_BAND_2_A 0x485 -#define WM8915_DSP1_RX_EQ_BAND_2_B 0x486 -#define WM8915_DSP1_RX_EQ_BAND_2_C 0x487 -#define WM8915_DSP1_RX_EQ_BAND_2_PG 0x488 -#define WM8915_DSP1_RX_EQ_BAND_3_A 0x489 -#define WM8915_DSP1_RX_EQ_BAND_3_B 0x48A -#define WM8915_DSP1_RX_EQ_BAND_3_C 0x48B -#define WM8915_DSP1_RX_EQ_BAND_3_PG 0x48C -#define WM8915_DSP1_RX_EQ_BAND_4_A 0x48D -#define WM8915_DSP1_RX_EQ_BAND_4_B 0x48E -#define WM8915_DSP1_RX_EQ_BAND_4_C 0x48F -#define WM8915_DSP1_RX_EQ_BAND_4_PG 0x490 -#define WM8915_DSP1_RX_EQ_BAND_5_A 0x491 -#define WM8915_DSP1_RX_EQ_BAND_5_B 0x492 -#define WM8915_DSP1_RX_EQ_BAND_5_PG 0x493 -#define WM8915_DSP2_TX_LEFT_VOLUME 0x500 -#define WM8915_DSP2_TX_RIGHT_VOLUME 0x501 -#define WM8915_DSP2_RX_LEFT_VOLUME 0x502 -#define WM8915_DSP2_RX_RIGHT_VOLUME 0x503 -#define WM8915_DSP2_TX_FILTERS 0x510 -#define WM8915_DSP2_RX_FILTERS_1 0x520 -#define WM8915_DSP2_RX_FILTERS_2 0x521 -#define WM8915_DSP2_DRC_1 0x540 -#define WM8915_DSP2_DRC_2 0x541 -#define WM8915_DSP2_DRC_3 0x542 -#define WM8915_DSP2_DRC_4 0x543 -#define WM8915_DSP2_DRC_5 0x544 -#define WM8915_DSP2_RX_EQ_GAINS_1 0x580 -#define WM8915_DSP2_RX_EQ_GAINS_2 0x581 -#define WM8915_DSP2_RX_EQ_BAND_1_A 0x582 -#define WM8915_DSP2_RX_EQ_BAND_1_B 0x583 -#define WM8915_DSP2_RX_EQ_BAND_1_PG 0x584 -#define WM8915_DSP2_RX_EQ_BAND_2_A 0x585 -#define WM8915_DSP2_RX_EQ_BAND_2_B 0x586 -#define WM8915_DSP2_RX_EQ_BAND_2_C 0x587 -#define WM8915_DSP2_RX_EQ_BAND_2_PG 0x588 -#define WM8915_DSP2_RX_EQ_BAND_3_A 0x589 -#define WM8915_DSP2_RX_EQ_BAND_3_B 0x58A -#define WM8915_DSP2_RX_EQ_BAND_3_C 0x58B -#define WM8915_DSP2_RX_EQ_BAND_3_PG 0x58C -#define WM8915_DSP2_RX_EQ_BAND_4_A 0x58D -#define WM8915_DSP2_RX_EQ_BAND_4_B 0x58E -#define WM8915_DSP2_RX_EQ_BAND_4_C 0x58F -#define WM8915_DSP2_RX_EQ_BAND_4_PG 0x590 -#define WM8915_DSP2_RX_EQ_BAND_5_A 0x591 -#define WM8915_DSP2_RX_EQ_BAND_5_B 0x592 -#define WM8915_DSP2_RX_EQ_BAND_5_PG 0x593 -#define WM8915_DAC1_MIXER_VOLUMES 0x600 -#define WM8915_DAC1_LEFT_MIXER_ROUTING 0x601 -#define WM8915_DAC1_RIGHT_MIXER_ROUTING 0x602 -#define WM8915_DAC2_MIXER_VOLUMES 0x603 -#define WM8915_DAC2_LEFT_MIXER_ROUTING 0x604 -#define WM8915_DAC2_RIGHT_MIXER_ROUTING 0x605 -#define WM8915_DSP1_TX_LEFT_MIXER_ROUTING 0x606 -#define WM8915_DSP1_TX_RIGHT_MIXER_ROUTING 0x607 -#define WM8915_DSP2_TX_LEFT_MIXER_ROUTING 0x608 -#define WM8915_DSP2_TX_RIGHT_MIXER_ROUTING 0x609 -#define WM8915_DSP_TX_MIXER_SELECT 0x60A -#define WM8915_DAC_SOFTMUTE 0x610 -#define WM8915_OVERSAMPLING 0x620 -#define WM8915_SIDETONE 0x621 -#define WM8915_GPIO_1 0x700 -#define WM8915_GPIO_2 0x701 -#define WM8915_GPIO_3 0x702 -#define WM8915_GPIO_4 0x703 -#define WM8915_GPIO_5 0x704 -#define WM8915_PULL_CONTROL_1 0x720 -#define WM8915_PULL_CONTROL_2 0x721 -#define WM8915_INTERRUPT_STATUS_1 0x730 -#define WM8915_INTERRUPT_STATUS_2 0x731 -#define WM8915_INTERRUPT_RAW_STATUS_2 0x732 -#define WM8915_INTERRUPT_STATUS_1_MASK 0x738 -#define WM8915_INTERRUPT_STATUS_2_MASK 0x739 -#define WM8915_INTERRUPT_CONTROL 0x740 -#define WM8915_LEFT_PDM_SPEAKER 0x800 -#define WM8915_RIGHT_PDM_SPEAKER 0x801 -#define WM8915_PDM_SPEAKER_MUTE_SEQUENCE 0x802 -#define WM8915_PDM_SPEAKER_VOLUME 0x803 -#define WM8915_WRITE_SEQUENCER_0 0x3000 -#define WM8915_WRITE_SEQUENCER_1 0x3001 -#define WM8915_WRITE_SEQUENCER_2 0x3002 -#define WM8915_WRITE_SEQUENCER_3 0x3003 -#define WM8915_WRITE_SEQUENCER_4 0x3004 -#define WM8915_WRITE_SEQUENCER_5 0x3005 -#define WM8915_WRITE_SEQUENCER_6 0x3006 -#define WM8915_WRITE_SEQUENCER_7 0x3007 -#define WM8915_WRITE_SEQUENCER_8 0x3008 -#define WM8915_WRITE_SEQUENCER_9 0x3009 -#define WM8915_WRITE_SEQUENCER_10 0x300A -#define WM8915_WRITE_SEQUENCER_11 0x300B -#define WM8915_WRITE_SEQUENCER_12 0x300C -#define WM8915_WRITE_SEQUENCER_13 0x300D -#define WM8915_WRITE_SEQUENCER_14 0x300E -#define WM8915_WRITE_SEQUENCER_15 0x300F -#define WM8915_WRITE_SEQUENCER_16 0x3010 -#define WM8915_WRITE_SEQUENCER_17 0x3011 -#define WM8915_WRITE_SEQUENCER_18 0x3012 -#define WM8915_WRITE_SEQUENCER_19 0x3013 -#define WM8915_WRITE_SEQUENCER_20 0x3014 -#define WM8915_WRITE_SEQUENCER_21 0x3015 -#define WM8915_WRITE_SEQUENCER_22 0x3016 -#define WM8915_WRITE_SEQUENCER_23 0x3017 -#define WM8915_WRITE_SEQUENCER_24 0x3018 -#define WM8915_WRITE_SEQUENCER_25 0x3019 -#define WM8915_WRITE_SEQUENCER_26 0x301A -#define WM8915_WRITE_SEQUENCER_27 0x301B -#define WM8915_WRITE_SEQUENCER_28 0x301C -#define WM8915_WRITE_SEQUENCER_29 0x301D -#define WM8915_WRITE_SEQUENCER_30 0x301E -#define WM8915_WRITE_SEQUENCER_31 0x301F -#define WM8915_WRITE_SEQUENCER_32 0x3020 -#define WM8915_WRITE_SEQUENCER_33 0x3021 -#define WM8915_WRITE_SEQUENCER_34 0x3022 -#define WM8915_WRITE_SEQUENCER_35 0x3023 -#define WM8915_WRITE_SEQUENCER_36 0x3024 -#define WM8915_WRITE_SEQUENCER_37 0x3025 -#define WM8915_WRITE_SEQUENCER_38 0x3026 -#define WM8915_WRITE_SEQUENCER_39 0x3027 -#define WM8915_WRITE_SEQUENCER_40 0x3028 -#define WM8915_WRITE_SEQUENCER_41 0x3029 -#define WM8915_WRITE_SEQUENCER_42 0x302A -#define WM8915_WRITE_SEQUENCER_43 0x302B -#define WM8915_WRITE_SEQUENCER_44 0x302C -#define WM8915_WRITE_SEQUENCER_45 0x302D -#define WM8915_WRITE_SEQUENCER_46 0x302E -#define WM8915_WRITE_SEQUENCER_47 0x302F -#define WM8915_WRITE_SEQUENCER_48 0x3030 -#define WM8915_WRITE_SEQUENCER_49 0x3031 -#define WM8915_WRITE_SEQUENCER_50 0x3032 -#define WM8915_WRITE_SEQUENCER_51 0x3033 -#define WM8915_WRITE_SEQUENCER_52 0x3034 -#define WM8915_WRITE_SEQUENCER_53 0x3035 -#define WM8915_WRITE_SEQUENCER_54 0x3036 -#define WM8915_WRITE_SEQUENCER_55 0x3037 -#define WM8915_WRITE_SEQUENCER_56 0x3038 -#define WM8915_WRITE_SEQUENCER_57 0x3039 -#define WM8915_WRITE_SEQUENCER_58 0x303A -#define WM8915_WRITE_SEQUENCER_59 0x303B -#define WM8915_WRITE_SEQUENCER_60 0x303C -#define WM8915_WRITE_SEQUENCER_61 0x303D -#define WM8915_WRITE_SEQUENCER_62 0x303E -#define WM8915_WRITE_SEQUENCER_63 0x303F -#define WM8915_WRITE_SEQUENCER_64 0x3040 -#define WM8915_WRITE_SEQUENCER_65 0x3041 -#define WM8915_WRITE_SEQUENCER_66 0x3042 -#define WM8915_WRITE_SEQUENCER_67 0x3043 -#define WM8915_WRITE_SEQUENCER_68 0x3044 -#define WM8915_WRITE_SEQUENCER_69 0x3045 -#define WM8915_WRITE_SEQUENCER_70 0x3046 -#define WM8915_WRITE_SEQUENCER_71 0x3047 -#define WM8915_WRITE_SEQUENCER_72 0x3048 -#define WM8915_WRITE_SEQUENCER_73 0x3049 -#define WM8915_WRITE_SEQUENCER_74 0x304A -#define WM8915_WRITE_SEQUENCER_75 0x304B -#define WM8915_WRITE_SEQUENCER_76 0x304C -#define WM8915_WRITE_SEQUENCER_77 0x304D -#define WM8915_WRITE_SEQUENCER_78 0x304E -#define WM8915_WRITE_SEQUENCER_79 0x304F -#define WM8915_WRITE_SEQUENCER_80 0x3050 -#define WM8915_WRITE_SEQUENCER_81 0x3051 -#define WM8915_WRITE_SEQUENCER_82 0x3052 -#define WM8915_WRITE_SEQUENCER_83 0x3053 -#define WM8915_WRITE_SEQUENCER_84 0x3054 -#define WM8915_WRITE_SEQUENCER_85 0x3055 -#define WM8915_WRITE_SEQUENCER_86 0x3056 -#define WM8915_WRITE_SEQUENCER_87 0x3057 -#define WM8915_WRITE_SEQUENCER_88 0x3058 -#define WM8915_WRITE_SEQUENCER_89 0x3059 -#define WM8915_WRITE_SEQUENCER_90 0x305A -#define WM8915_WRITE_SEQUENCER_91 0x305B -#define WM8915_WRITE_SEQUENCER_92 0x305C -#define WM8915_WRITE_SEQUENCER_93 0x305D -#define WM8915_WRITE_SEQUENCER_94 0x305E -#define WM8915_WRITE_SEQUENCER_95 0x305F -#define WM8915_WRITE_SEQUENCER_96 0x3060 -#define WM8915_WRITE_SEQUENCER_97 0x3061 -#define WM8915_WRITE_SEQUENCER_98 0x3062 -#define WM8915_WRITE_SEQUENCER_99 0x3063 -#define WM8915_WRITE_SEQUENCER_100 0x3064 -#define WM8915_WRITE_SEQUENCER_101 0x3065 -#define WM8915_WRITE_SEQUENCER_102 0x3066 -#define WM8915_WRITE_SEQUENCER_103 0x3067 -#define WM8915_WRITE_SEQUENCER_104 0x3068 -#define WM8915_WRITE_SEQUENCER_105 0x3069 -#define WM8915_WRITE_SEQUENCER_106 0x306A -#define WM8915_WRITE_SEQUENCER_107 0x306B -#define WM8915_WRITE_SEQUENCER_108 0x306C -#define WM8915_WRITE_SEQUENCER_109 0x306D -#define WM8915_WRITE_SEQUENCER_110 0x306E -#define WM8915_WRITE_SEQUENCER_111 0x306F -#define WM8915_WRITE_SEQUENCER_112 0x3070 -#define WM8915_WRITE_SEQUENCER_113 0x3071 -#define WM8915_WRITE_SEQUENCER_114 0x3072 -#define WM8915_WRITE_SEQUENCER_115 0x3073 -#define WM8915_WRITE_SEQUENCER_116 0x3074 -#define WM8915_WRITE_SEQUENCER_117 0x3075 -#define WM8915_WRITE_SEQUENCER_118 0x3076 -#define WM8915_WRITE_SEQUENCER_119 0x3077 -#define WM8915_WRITE_SEQUENCER_120 0x3078 -#define WM8915_WRITE_SEQUENCER_121 0x3079 -#define WM8915_WRITE_SEQUENCER_122 0x307A -#define WM8915_WRITE_SEQUENCER_123 0x307B -#define WM8915_WRITE_SEQUENCER_124 0x307C -#define WM8915_WRITE_SEQUENCER_125 0x307D -#define WM8915_WRITE_SEQUENCER_126 0x307E -#define WM8915_WRITE_SEQUENCER_127 0x307F -#define WM8915_WRITE_SEQUENCER_128 0x3080 -#define WM8915_WRITE_SEQUENCER_129 0x3081 -#define WM8915_WRITE_SEQUENCER_130 0x3082 -#define WM8915_WRITE_SEQUENCER_131 0x3083 -#define WM8915_WRITE_SEQUENCER_132 0x3084 -#define WM8915_WRITE_SEQUENCER_133 0x3085 -#define WM8915_WRITE_SEQUENCER_134 0x3086 -#define WM8915_WRITE_SEQUENCER_135 0x3087 -#define WM8915_WRITE_SEQUENCER_136 0x3088 -#define WM8915_WRITE_SEQUENCER_137 0x3089 -#define WM8915_WRITE_SEQUENCER_138 0x308A -#define WM8915_WRITE_SEQUENCER_139 0x308B -#define WM8915_WRITE_SEQUENCER_140 0x308C -#define WM8915_WRITE_SEQUENCER_141 0x308D -#define WM8915_WRITE_SEQUENCER_142 0x308E -#define WM8915_WRITE_SEQUENCER_143 0x308F -#define WM8915_WRITE_SEQUENCER_144 0x3090 -#define WM8915_WRITE_SEQUENCER_145 0x3091 -#define WM8915_WRITE_SEQUENCER_146 0x3092 -#define WM8915_WRITE_SEQUENCER_147 0x3093 -#define WM8915_WRITE_SEQUENCER_148 0x3094 -#define WM8915_WRITE_SEQUENCER_149 0x3095 -#define WM8915_WRITE_SEQUENCER_150 0x3096 -#define WM8915_WRITE_SEQUENCER_151 0x3097 -#define WM8915_WRITE_SEQUENCER_152 0x3098 -#define WM8915_WRITE_SEQUENCER_153 0x3099 -#define WM8915_WRITE_SEQUENCER_154 0x309A -#define WM8915_WRITE_SEQUENCER_155 0x309B -#define WM8915_WRITE_SEQUENCER_156 0x309C -#define WM8915_WRITE_SEQUENCER_157 0x309D -#define WM8915_WRITE_SEQUENCER_158 0x309E -#define WM8915_WRITE_SEQUENCER_159 0x309F -#define WM8915_WRITE_SEQUENCER_160 0x30A0 -#define WM8915_WRITE_SEQUENCER_161 0x30A1 -#define WM8915_WRITE_SEQUENCER_162 0x30A2 -#define WM8915_WRITE_SEQUENCER_163 0x30A3 -#define WM8915_WRITE_SEQUENCER_164 0x30A4 -#define WM8915_WRITE_SEQUENCER_165 0x30A5 -#define WM8915_WRITE_SEQUENCER_166 0x30A6 -#define WM8915_WRITE_SEQUENCER_167 0x30A7 -#define WM8915_WRITE_SEQUENCER_168 0x30A8 -#define WM8915_WRITE_SEQUENCER_169 0x30A9 -#define WM8915_WRITE_SEQUENCER_170 0x30AA -#define WM8915_WRITE_SEQUENCER_171 0x30AB -#define WM8915_WRITE_SEQUENCER_172 0x30AC -#define WM8915_WRITE_SEQUENCER_173 0x30AD -#define WM8915_WRITE_SEQUENCER_174 0x30AE -#define WM8915_WRITE_SEQUENCER_175 0x30AF -#define WM8915_WRITE_SEQUENCER_176 0x30B0 -#define WM8915_WRITE_SEQUENCER_177 0x30B1 -#define WM8915_WRITE_SEQUENCER_178 0x30B2 -#define WM8915_WRITE_SEQUENCER_179 0x30B3 -#define WM8915_WRITE_SEQUENCER_180 0x30B4 -#define WM8915_WRITE_SEQUENCER_181 0x30B5 -#define WM8915_WRITE_SEQUENCER_182 0x30B6 -#define WM8915_WRITE_SEQUENCER_183 0x30B7 -#define WM8915_WRITE_SEQUENCER_184 0x30B8 -#define WM8915_WRITE_SEQUENCER_185 0x30B9 -#define WM8915_WRITE_SEQUENCER_186 0x30BA -#define WM8915_WRITE_SEQUENCER_187 0x30BB -#define WM8915_WRITE_SEQUENCER_188 0x30BC -#define WM8915_WRITE_SEQUENCER_189 0x30BD -#define WM8915_WRITE_SEQUENCER_190 0x30BE -#define WM8915_WRITE_SEQUENCER_191 0x30BF -#define WM8915_WRITE_SEQUENCER_192 0x30C0 -#define WM8915_WRITE_SEQUENCER_193 0x30C1 -#define WM8915_WRITE_SEQUENCER_194 0x30C2 -#define WM8915_WRITE_SEQUENCER_195 0x30C3 -#define WM8915_WRITE_SEQUENCER_196 0x30C4 -#define WM8915_WRITE_SEQUENCER_197 0x30C5 -#define WM8915_WRITE_SEQUENCER_198 0x30C6 -#define WM8915_WRITE_SEQUENCER_199 0x30C7 -#define WM8915_WRITE_SEQUENCER_200 0x30C8 -#define WM8915_WRITE_SEQUENCER_201 0x30C9 -#define WM8915_WRITE_SEQUENCER_202 0x30CA -#define WM8915_WRITE_SEQUENCER_203 0x30CB -#define WM8915_WRITE_SEQUENCER_204 0x30CC -#define WM8915_WRITE_SEQUENCER_205 0x30CD -#define WM8915_WRITE_SEQUENCER_206 0x30CE -#define WM8915_WRITE_SEQUENCER_207 0x30CF -#define WM8915_WRITE_SEQUENCER_208 0x30D0 -#define WM8915_WRITE_SEQUENCER_209 0x30D1 -#define WM8915_WRITE_SEQUENCER_210 0x30D2 -#define WM8915_WRITE_SEQUENCER_211 0x30D3 -#define WM8915_WRITE_SEQUENCER_212 0x30D4 -#define WM8915_WRITE_SEQUENCER_213 0x30D5 -#define WM8915_WRITE_SEQUENCER_214 0x30D6 -#define WM8915_WRITE_SEQUENCER_215 0x30D7 -#define WM8915_WRITE_SEQUENCER_216 0x30D8 -#define WM8915_WRITE_SEQUENCER_217 0x30D9 -#define WM8915_WRITE_SEQUENCER_218 0x30DA -#define WM8915_WRITE_SEQUENCER_219 0x30DB -#define WM8915_WRITE_SEQUENCER_220 0x30DC -#define WM8915_WRITE_SEQUENCER_221 0x30DD -#define WM8915_WRITE_SEQUENCER_222 0x30DE -#define WM8915_WRITE_SEQUENCER_223 0x30DF -#define WM8915_WRITE_SEQUENCER_224 0x30E0 -#define WM8915_WRITE_SEQUENCER_225 0x30E1 -#define WM8915_WRITE_SEQUENCER_226 0x30E2 -#define WM8915_WRITE_SEQUENCER_227 0x30E3 -#define WM8915_WRITE_SEQUENCER_228 0x30E4 -#define WM8915_WRITE_SEQUENCER_229 0x30E5 -#define WM8915_WRITE_SEQUENCER_230 0x30E6 -#define WM8915_WRITE_SEQUENCER_231 0x30E7 -#define WM8915_WRITE_SEQUENCER_232 0x30E8 -#define WM8915_WRITE_SEQUENCER_233 0x30E9 -#define WM8915_WRITE_SEQUENCER_234 0x30EA -#define WM8915_WRITE_SEQUENCER_235 0x30EB -#define WM8915_WRITE_SEQUENCER_236 0x30EC -#define WM8915_WRITE_SEQUENCER_237 0x30ED -#define WM8915_WRITE_SEQUENCER_238 0x30EE -#define WM8915_WRITE_SEQUENCER_239 0x30EF -#define WM8915_WRITE_SEQUENCER_240 0x30F0 -#define WM8915_WRITE_SEQUENCER_241 0x30F1 -#define WM8915_WRITE_SEQUENCER_242 0x30F2 -#define WM8915_WRITE_SEQUENCER_243 0x30F3 -#define WM8915_WRITE_SEQUENCER_244 0x30F4 -#define WM8915_WRITE_SEQUENCER_245 0x30F5 -#define WM8915_WRITE_SEQUENCER_246 0x30F6 -#define WM8915_WRITE_SEQUENCER_247 0x30F7 -#define WM8915_WRITE_SEQUENCER_248 0x30F8 -#define WM8915_WRITE_SEQUENCER_249 0x30F9 -#define WM8915_WRITE_SEQUENCER_250 0x30FA -#define WM8915_WRITE_SEQUENCER_251 0x30FB -#define WM8915_WRITE_SEQUENCER_252 0x30FC -#define WM8915_WRITE_SEQUENCER_253 0x30FD -#define WM8915_WRITE_SEQUENCER_254 0x30FE -#define WM8915_WRITE_SEQUENCER_255 0x30FF -#define WM8915_WRITE_SEQUENCER_256 0x3100 -#define WM8915_WRITE_SEQUENCER_257 0x3101 -#define WM8915_WRITE_SEQUENCER_258 0x3102 -#define WM8915_WRITE_SEQUENCER_259 0x3103 -#define WM8915_WRITE_SEQUENCER_260 0x3104 -#define WM8915_WRITE_SEQUENCER_261 0x3105 -#define WM8915_WRITE_SEQUENCER_262 0x3106 -#define WM8915_WRITE_SEQUENCER_263 0x3107 -#define WM8915_WRITE_SEQUENCER_264 0x3108 -#define WM8915_WRITE_SEQUENCER_265 0x3109 -#define WM8915_WRITE_SEQUENCER_266 0x310A -#define WM8915_WRITE_SEQUENCER_267 0x310B -#define WM8915_WRITE_SEQUENCER_268 0x310C -#define WM8915_WRITE_SEQUENCER_269 0x310D -#define WM8915_WRITE_SEQUENCER_270 0x310E -#define WM8915_WRITE_SEQUENCER_271 0x310F -#define WM8915_WRITE_SEQUENCER_272 0x3110 -#define WM8915_WRITE_SEQUENCER_273 0x3111 -#define WM8915_WRITE_SEQUENCER_274 0x3112 -#define WM8915_WRITE_SEQUENCER_275 0x3113 -#define WM8915_WRITE_SEQUENCER_276 0x3114 -#define WM8915_WRITE_SEQUENCER_277 0x3115 -#define WM8915_WRITE_SEQUENCER_278 0x3116 -#define WM8915_WRITE_SEQUENCER_279 0x3117 -#define WM8915_WRITE_SEQUENCER_280 0x3118 -#define WM8915_WRITE_SEQUENCER_281 0x3119 -#define WM8915_WRITE_SEQUENCER_282 0x311A -#define WM8915_WRITE_SEQUENCER_283 0x311B -#define WM8915_WRITE_SEQUENCER_284 0x311C -#define WM8915_WRITE_SEQUENCER_285 0x311D -#define WM8915_WRITE_SEQUENCER_286 0x311E -#define WM8915_WRITE_SEQUENCER_287 0x311F -#define WM8915_WRITE_SEQUENCER_288 0x3120 -#define WM8915_WRITE_SEQUENCER_289 0x3121 -#define WM8915_WRITE_SEQUENCER_290 0x3122 -#define WM8915_WRITE_SEQUENCER_291 0x3123 -#define WM8915_WRITE_SEQUENCER_292 0x3124 -#define WM8915_WRITE_SEQUENCER_293 0x3125 -#define WM8915_WRITE_SEQUENCER_294 0x3126 -#define WM8915_WRITE_SEQUENCER_295 0x3127 -#define WM8915_WRITE_SEQUENCER_296 0x3128 -#define WM8915_WRITE_SEQUENCER_297 0x3129 -#define WM8915_WRITE_SEQUENCER_298 0x312A -#define WM8915_WRITE_SEQUENCER_299 0x312B -#define WM8915_WRITE_SEQUENCER_300 0x312C -#define WM8915_WRITE_SEQUENCER_301 0x312D -#define WM8915_WRITE_SEQUENCER_302 0x312E -#define WM8915_WRITE_SEQUENCER_303 0x312F -#define WM8915_WRITE_SEQUENCER_304 0x3130 -#define WM8915_WRITE_SEQUENCER_305 0x3131 -#define WM8915_WRITE_SEQUENCER_306 0x3132 -#define WM8915_WRITE_SEQUENCER_307 0x3133 -#define WM8915_WRITE_SEQUENCER_308 0x3134 -#define WM8915_WRITE_SEQUENCER_309 0x3135 -#define WM8915_WRITE_SEQUENCER_310 0x3136 -#define WM8915_WRITE_SEQUENCER_311 0x3137 -#define WM8915_WRITE_SEQUENCER_312 0x3138 -#define WM8915_WRITE_SEQUENCER_313 0x3139 -#define WM8915_WRITE_SEQUENCER_314 0x313A -#define WM8915_WRITE_SEQUENCER_315 0x313B -#define WM8915_WRITE_SEQUENCER_316 0x313C -#define WM8915_WRITE_SEQUENCER_317 0x313D -#define WM8915_WRITE_SEQUENCER_318 0x313E -#define WM8915_WRITE_SEQUENCER_319 0x313F -#define WM8915_WRITE_SEQUENCER_320 0x3140 -#define WM8915_WRITE_SEQUENCER_321 0x3141 -#define WM8915_WRITE_SEQUENCER_322 0x3142 -#define WM8915_WRITE_SEQUENCER_323 0x3143 -#define WM8915_WRITE_SEQUENCER_324 0x3144 -#define WM8915_WRITE_SEQUENCER_325 0x3145 -#define WM8915_WRITE_SEQUENCER_326 0x3146 -#define WM8915_WRITE_SEQUENCER_327 0x3147 -#define WM8915_WRITE_SEQUENCER_328 0x3148 -#define WM8915_WRITE_SEQUENCER_329 0x3149 -#define WM8915_WRITE_SEQUENCER_330 0x314A -#define WM8915_WRITE_SEQUENCER_331 0x314B -#define WM8915_WRITE_SEQUENCER_332 0x314C -#define WM8915_WRITE_SEQUENCER_333 0x314D -#define WM8915_WRITE_SEQUENCER_334 0x314E -#define WM8915_WRITE_SEQUENCER_335 0x314F -#define WM8915_WRITE_SEQUENCER_336 0x3150 -#define WM8915_WRITE_SEQUENCER_337 0x3151 -#define WM8915_WRITE_SEQUENCER_338 0x3152 -#define WM8915_WRITE_SEQUENCER_339 0x3153 -#define WM8915_WRITE_SEQUENCER_340 0x3154 -#define WM8915_WRITE_SEQUENCER_341 0x3155 -#define WM8915_WRITE_SEQUENCER_342 0x3156 -#define WM8915_WRITE_SEQUENCER_343 0x3157 -#define WM8915_WRITE_SEQUENCER_344 0x3158 -#define WM8915_WRITE_SEQUENCER_345 0x3159 -#define WM8915_WRITE_SEQUENCER_346 0x315A -#define WM8915_WRITE_SEQUENCER_347 0x315B -#define WM8915_WRITE_SEQUENCER_348 0x315C -#define WM8915_WRITE_SEQUENCER_349 0x315D -#define WM8915_WRITE_SEQUENCER_350 0x315E -#define WM8915_WRITE_SEQUENCER_351 0x315F -#define WM8915_WRITE_SEQUENCER_352 0x3160 -#define WM8915_WRITE_SEQUENCER_353 0x3161 -#define WM8915_WRITE_SEQUENCER_354 0x3162 -#define WM8915_WRITE_SEQUENCER_355 0x3163 -#define WM8915_WRITE_SEQUENCER_356 0x3164 -#define WM8915_WRITE_SEQUENCER_357 0x3165 -#define WM8915_WRITE_SEQUENCER_358 0x3166 -#define WM8915_WRITE_SEQUENCER_359 0x3167 -#define WM8915_WRITE_SEQUENCER_360 0x3168 -#define WM8915_WRITE_SEQUENCER_361 0x3169 -#define WM8915_WRITE_SEQUENCER_362 0x316A -#define WM8915_WRITE_SEQUENCER_363 0x316B -#define WM8915_WRITE_SEQUENCER_364 0x316C -#define WM8915_WRITE_SEQUENCER_365 0x316D -#define WM8915_WRITE_SEQUENCER_366 0x316E -#define WM8915_WRITE_SEQUENCER_367 0x316F -#define WM8915_WRITE_SEQUENCER_368 0x3170 -#define WM8915_WRITE_SEQUENCER_369 0x3171 -#define WM8915_WRITE_SEQUENCER_370 0x3172 -#define WM8915_WRITE_SEQUENCER_371 0x3173 -#define WM8915_WRITE_SEQUENCER_372 0x3174 -#define WM8915_WRITE_SEQUENCER_373 0x3175 -#define WM8915_WRITE_SEQUENCER_374 0x3176 -#define WM8915_WRITE_SEQUENCER_375 0x3177 -#define WM8915_WRITE_SEQUENCER_376 0x3178 -#define WM8915_WRITE_SEQUENCER_377 0x3179 -#define WM8915_WRITE_SEQUENCER_378 0x317A -#define WM8915_WRITE_SEQUENCER_379 0x317B -#define WM8915_WRITE_SEQUENCER_380 0x317C -#define WM8915_WRITE_SEQUENCER_381 0x317D -#define WM8915_WRITE_SEQUENCER_382 0x317E -#define WM8915_WRITE_SEQUENCER_383 0x317F -#define WM8915_WRITE_SEQUENCER_384 0x3180 -#define WM8915_WRITE_SEQUENCER_385 0x3181 -#define WM8915_WRITE_SEQUENCER_386 0x3182 -#define WM8915_WRITE_SEQUENCER_387 0x3183 -#define WM8915_WRITE_SEQUENCER_388 0x3184 -#define WM8915_WRITE_SEQUENCER_389 0x3185 -#define WM8915_WRITE_SEQUENCER_390 0x3186 -#define WM8915_WRITE_SEQUENCER_391 0x3187 -#define WM8915_WRITE_SEQUENCER_392 0x3188 -#define WM8915_WRITE_SEQUENCER_393 0x3189 -#define WM8915_WRITE_SEQUENCER_394 0x318A -#define WM8915_WRITE_SEQUENCER_395 0x318B -#define WM8915_WRITE_SEQUENCER_396 0x318C -#define WM8915_WRITE_SEQUENCER_397 0x318D -#define WM8915_WRITE_SEQUENCER_398 0x318E -#define WM8915_WRITE_SEQUENCER_399 0x318F -#define WM8915_WRITE_SEQUENCER_400 0x3190 -#define WM8915_WRITE_SEQUENCER_401 0x3191 -#define WM8915_WRITE_SEQUENCER_402 0x3192 -#define WM8915_WRITE_SEQUENCER_403 0x3193 -#define WM8915_WRITE_SEQUENCER_404 0x3194 -#define WM8915_WRITE_SEQUENCER_405 0x3195 -#define WM8915_WRITE_SEQUENCER_406 0x3196 -#define WM8915_WRITE_SEQUENCER_407 0x3197 -#define WM8915_WRITE_SEQUENCER_408 0x3198 -#define WM8915_WRITE_SEQUENCER_409 0x3199 -#define WM8915_WRITE_SEQUENCER_410 0x319A -#define WM8915_WRITE_SEQUENCER_411 0x319B -#define WM8915_WRITE_SEQUENCER_412 0x319C -#define WM8915_WRITE_SEQUENCER_413 0x319D -#define WM8915_WRITE_SEQUENCER_414 0x319E -#define WM8915_WRITE_SEQUENCER_415 0x319F -#define WM8915_WRITE_SEQUENCER_416 0x31A0 -#define WM8915_WRITE_SEQUENCER_417 0x31A1 -#define WM8915_WRITE_SEQUENCER_418 0x31A2 -#define WM8915_WRITE_SEQUENCER_419 0x31A3 -#define WM8915_WRITE_SEQUENCER_420 0x31A4 -#define WM8915_WRITE_SEQUENCER_421 0x31A5 -#define WM8915_WRITE_SEQUENCER_422 0x31A6 -#define WM8915_WRITE_SEQUENCER_423 0x31A7 -#define WM8915_WRITE_SEQUENCER_424 0x31A8 -#define WM8915_WRITE_SEQUENCER_425 0x31A9 -#define WM8915_WRITE_SEQUENCER_426 0x31AA -#define WM8915_WRITE_SEQUENCER_427 0x31AB -#define WM8915_WRITE_SEQUENCER_428 0x31AC -#define WM8915_WRITE_SEQUENCER_429 0x31AD -#define WM8915_WRITE_SEQUENCER_430 0x31AE -#define WM8915_WRITE_SEQUENCER_431 0x31AF -#define WM8915_WRITE_SEQUENCER_432 0x31B0 -#define WM8915_WRITE_SEQUENCER_433 0x31B1 -#define WM8915_WRITE_SEQUENCER_434 0x31B2 -#define WM8915_WRITE_SEQUENCER_435 0x31B3 -#define WM8915_WRITE_SEQUENCER_436 0x31B4 -#define WM8915_WRITE_SEQUENCER_437 0x31B5 -#define WM8915_WRITE_SEQUENCER_438 0x31B6 -#define WM8915_WRITE_SEQUENCER_439 0x31B7 -#define WM8915_WRITE_SEQUENCER_440 0x31B8 -#define WM8915_WRITE_SEQUENCER_441 0x31B9 -#define WM8915_WRITE_SEQUENCER_442 0x31BA -#define WM8915_WRITE_SEQUENCER_443 0x31BB -#define WM8915_WRITE_SEQUENCER_444 0x31BC -#define WM8915_WRITE_SEQUENCER_445 0x31BD -#define WM8915_WRITE_SEQUENCER_446 0x31BE -#define WM8915_WRITE_SEQUENCER_447 0x31BF -#define WM8915_WRITE_SEQUENCER_448 0x31C0 -#define WM8915_WRITE_SEQUENCER_449 0x31C1 -#define WM8915_WRITE_SEQUENCER_450 0x31C2 -#define WM8915_WRITE_SEQUENCER_451 0x31C3 -#define WM8915_WRITE_SEQUENCER_452 0x31C4 -#define WM8915_WRITE_SEQUENCER_453 0x31C5 -#define WM8915_WRITE_SEQUENCER_454 0x31C6 -#define WM8915_WRITE_SEQUENCER_455 0x31C7 -#define WM8915_WRITE_SEQUENCER_456 0x31C8 -#define WM8915_WRITE_SEQUENCER_457 0x31C9 -#define WM8915_WRITE_SEQUENCER_458 0x31CA -#define WM8915_WRITE_SEQUENCER_459 0x31CB -#define WM8915_WRITE_SEQUENCER_460 0x31CC -#define WM8915_WRITE_SEQUENCER_461 0x31CD -#define WM8915_WRITE_SEQUENCER_462 0x31CE -#define WM8915_WRITE_SEQUENCER_463 0x31CF -#define WM8915_WRITE_SEQUENCER_464 0x31D0 -#define WM8915_WRITE_SEQUENCER_465 0x31D1 -#define WM8915_WRITE_SEQUENCER_466 0x31D2 -#define WM8915_WRITE_SEQUENCER_467 0x31D3 -#define WM8915_WRITE_SEQUENCER_468 0x31D4 -#define WM8915_WRITE_SEQUENCER_469 0x31D5 -#define WM8915_WRITE_SEQUENCER_470 0x31D6 -#define WM8915_WRITE_SEQUENCER_471 0x31D7 -#define WM8915_WRITE_SEQUENCER_472 0x31D8 -#define WM8915_WRITE_SEQUENCER_473 0x31D9 -#define WM8915_WRITE_SEQUENCER_474 0x31DA -#define WM8915_WRITE_SEQUENCER_475 0x31DB -#define WM8915_WRITE_SEQUENCER_476 0x31DC -#define WM8915_WRITE_SEQUENCER_477 0x31DD -#define WM8915_WRITE_SEQUENCER_478 0x31DE -#define WM8915_WRITE_SEQUENCER_479 0x31DF -#define WM8915_WRITE_SEQUENCER_480 0x31E0 -#define WM8915_WRITE_SEQUENCER_481 0x31E1 -#define WM8915_WRITE_SEQUENCER_482 0x31E2 -#define WM8915_WRITE_SEQUENCER_483 0x31E3 -#define WM8915_WRITE_SEQUENCER_484 0x31E4 -#define WM8915_WRITE_SEQUENCER_485 0x31E5 -#define WM8915_WRITE_SEQUENCER_486 0x31E6 -#define WM8915_WRITE_SEQUENCER_487 0x31E7 -#define WM8915_WRITE_SEQUENCER_488 0x31E8 -#define WM8915_WRITE_SEQUENCER_489 0x31E9 -#define WM8915_WRITE_SEQUENCER_490 0x31EA -#define WM8915_WRITE_SEQUENCER_491 0x31EB -#define WM8915_WRITE_SEQUENCER_492 0x31EC -#define WM8915_WRITE_SEQUENCER_493 0x31ED -#define WM8915_WRITE_SEQUENCER_494 0x31EE -#define WM8915_WRITE_SEQUENCER_495 0x31EF -#define WM8915_WRITE_SEQUENCER_496 0x31F0 -#define WM8915_WRITE_SEQUENCER_497 0x31F1 -#define WM8915_WRITE_SEQUENCER_498 0x31F2 -#define WM8915_WRITE_SEQUENCER_499 0x31F3 -#define WM8915_WRITE_SEQUENCER_500 0x31F4 -#define WM8915_WRITE_SEQUENCER_501 0x31F5 -#define WM8915_WRITE_SEQUENCER_502 0x31F6 -#define WM8915_WRITE_SEQUENCER_503 0x31F7 -#define WM8915_WRITE_SEQUENCER_504 0x31F8 -#define WM8915_WRITE_SEQUENCER_505 0x31F9 -#define WM8915_WRITE_SEQUENCER_506 0x31FA -#define WM8915_WRITE_SEQUENCER_507 0x31FB -#define WM8915_WRITE_SEQUENCER_508 0x31FC -#define WM8915_WRITE_SEQUENCER_509 0x31FD -#define WM8915_WRITE_SEQUENCER_510 0x31FE -#define WM8915_WRITE_SEQUENCER_511 0x31FF - -#define WM8915_REGISTER_COUNT 706 -#define WM8915_MAX_REGISTER 0x31FF - -/* - * Field Definitions. - */ - -/* - * R0 (0x00) - Software Reset - */ -#define WM8915_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */ -#define WM8915_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */ -#define WM8915_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */ - -/* - * R1 (0x01) - Power Management (1) - */ -#define WM8915_MICB2_ENA 0x0200 /* MICB2_ENA */ -#define WM8915_MICB2_ENA_MASK 0x0200 /* MICB2_ENA */ -#define WM8915_MICB2_ENA_SHIFT 9 /* MICB2_ENA */ -#define WM8915_MICB2_ENA_WIDTH 1 /* MICB2_ENA */ -#define WM8915_MICB1_ENA 0x0100 /* MICB1_ENA */ -#define WM8915_MICB1_ENA_MASK 0x0100 /* MICB1_ENA */ -#define WM8915_MICB1_ENA_SHIFT 8 /* MICB1_ENA */ -#define WM8915_MICB1_ENA_WIDTH 1 /* MICB1_ENA */ -#define WM8915_HPOUT2L_ENA 0x0080 /* HPOUT2L_ENA */ -#define WM8915_HPOUT2L_ENA_MASK 0x0080 /* HPOUT2L_ENA */ -#define WM8915_HPOUT2L_ENA_SHIFT 7 /* HPOUT2L_ENA */ -#define WM8915_HPOUT2L_ENA_WIDTH 1 /* HPOUT2L_ENA */ -#define WM8915_HPOUT2R_ENA 0x0040 /* HPOUT2R_ENA */ -#define WM8915_HPOUT2R_ENA_MASK 0x0040 /* HPOUT2R_ENA */ -#define WM8915_HPOUT2R_ENA_SHIFT 6 /* HPOUT2R_ENA */ -#define WM8915_HPOUT2R_ENA_WIDTH 1 /* HPOUT2R_ENA */ -#define WM8915_HPOUT1L_ENA 0x0020 /* HPOUT1L_ENA */ -#define WM8915_HPOUT1L_ENA_MASK 0x0020 /* HPOUT1L_ENA */ -#define WM8915_HPOUT1L_ENA_SHIFT 5 /* HPOUT1L_ENA */ -#define WM8915_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */ -#define WM8915_HPOUT1R_ENA 0x0010 /* HPOUT1R_ENA */ -#define WM8915_HPOUT1R_ENA_MASK 0x0010 /* HPOUT1R_ENA */ -#define WM8915_HPOUT1R_ENA_SHIFT 4 /* HPOUT1R_ENA */ -#define WM8915_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */ -#define WM8915_BG_ENA 0x0001 /* BG_ENA */ -#define WM8915_BG_ENA_MASK 0x0001 /* BG_ENA */ -#define WM8915_BG_ENA_SHIFT 0 /* BG_ENA */ -#define WM8915_BG_ENA_WIDTH 1 /* BG_ENA */ - -/* - * R2 (0x02) - Power Management (2) - */ -#define WM8915_OPCLK_ENA 0x0800 /* OPCLK_ENA */ -#define WM8915_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ -#define WM8915_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ -#define WM8915_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ -#define WM8915_INL_ENA 0x0020 /* INL_ENA */ -#define WM8915_INL_ENA_MASK 0x0020 /* INL_ENA */ -#define WM8915_INL_ENA_SHIFT 5 /* INL_ENA */ -#define WM8915_INL_ENA_WIDTH 1 /* INL_ENA */ -#define WM8915_INR_ENA 0x0010 /* INR_ENA */ -#define WM8915_INR_ENA_MASK 0x0010 /* INR_ENA */ -#define WM8915_INR_ENA_SHIFT 4 /* INR_ENA */ -#define WM8915_INR_ENA_WIDTH 1 /* INR_ENA */ -#define WM8915_LDO2_ENA 0x0002 /* LDO2_ENA */ -#define WM8915_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ -#define WM8915_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ -#define WM8915_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ - -/* - * R3 (0x03) - Power Management (3) - */ -#define WM8915_DSP2RXL_ENA 0x0800 /* DSP2RXL_ENA */ -#define WM8915_DSP2RXL_ENA_MASK 0x0800 /* DSP2RXL_ENA */ -#define WM8915_DSP2RXL_ENA_SHIFT 11 /* DSP2RXL_ENA */ -#define WM8915_DSP2RXL_ENA_WIDTH 1 /* DSP2RXL_ENA */ -#define WM8915_DSP2RXR_ENA 0x0400 /* DSP2RXR_ENA */ -#define WM8915_DSP2RXR_ENA_MASK 0x0400 /* DSP2RXR_ENA */ -#define WM8915_DSP2RXR_ENA_SHIFT 10 /* DSP2RXR_ENA */ -#define WM8915_DSP2RXR_ENA_WIDTH 1 /* DSP2RXR_ENA */ -#define WM8915_DSP1RXL_ENA 0x0200 /* DSP1RXL_ENA */ -#define WM8915_DSP1RXL_ENA_MASK 0x0200 /* DSP1RXL_ENA */ -#define WM8915_DSP1RXL_ENA_SHIFT 9 /* DSP1RXL_ENA */ -#define WM8915_DSP1RXL_ENA_WIDTH 1 /* DSP1RXL_ENA */ -#define WM8915_DSP1RXR_ENA 0x0100 /* DSP1RXR_ENA */ -#define WM8915_DSP1RXR_ENA_MASK 0x0100 /* DSP1RXR_ENA */ -#define WM8915_DSP1RXR_ENA_SHIFT 8 /* DSP1RXR_ENA */ -#define WM8915_DSP1RXR_ENA_WIDTH 1 /* DSP1RXR_ENA */ -#define WM8915_DMIC2L_ENA 0x0020 /* DMIC2L_ENA */ -#define WM8915_DMIC2L_ENA_MASK 0x0020 /* DMIC2L_ENA */ -#define WM8915_DMIC2L_ENA_SHIFT 5 /* DMIC2L_ENA */ -#define WM8915_DMIC2L_ENA_WIDTH 1 /* DMIC2L_ENA */ -#define WM8915_DMIC2R_ENA 0x0010 /* DMIC2R_ENA */ -#define WM8915_DMIC2R_ENA_MASK 0x0010 /* DMIC2R_ENA */ -#define WM8915_DMIC2R_ENA_SHIFT 4 /* DMIC2R_ENA */ -#define WM8915_DMIC2R_ENA_WIDTH 1 /* DMIC2R_ENA */ -#define WM8915_DMIC1L_ENA 0x0008 /* DMIC1L_ENA */ -#define WM8915_DMIC1L_ENA_MASK 0x0008 /* DMIC1L_ENA */ -#define WM8915_DMIC1L_ENA_SHIFT 3 /* DMIC1L_ENA */ -#define WM8915_DMIC1L_ENA_WIDTH 1 /* DMIC1L_ENA */ -#define WM8915_DMIC1R_ENA 0x0004 /* DMIC1R_ENA */ -#define WM8915_DMIC1R_ENA_MASK 0x0004 /* DMIC1R_ENA */ -#define WM8915_DMIC1R_ENA_SHIFT 2 /* DMIC1R_ENA */ -#define WM8915_DMIC1R_ENA_WIDTH 1 /* DMIC1R_ENA */ -#define WM8915_ADCL_ENA 0x0002 /* ADCL_ENA */ -#define WM8915_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ -#define WM8915_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ -#define WM8915_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ -#define WM8915_ADCR_ENA 0x0001 /* ADCR_ENA */ -#define WM8915_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ -#define WM8915_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ -#define WM8915_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ - -/* - * R4 (0x04) - Power Management (4) - */ -#define WM8915_AIF2RX_CHAN1_ENA 0x0200 /* AIF2RX_CHAN1_ENA */ -#define WM8915_AIF2RX_CHAN1_ENA_MASK 0x0200 /* AIF2RX_CHAN1_ENA */ -#define WM8915_AIF2RX_CHAN1_ENA_SHIFT 9 /* AIF2RX_CHAN1_ENA */ -#define WM8915_AIF2RX_CHAN1_ENA_WIDTH 1 /* AIF2RX_CHAN1_ENA */ -#define WM8915_AIF2RX_CHAN0_ENA 0x0100 /* AIF2RX_CHAN0_ENA */ -#define WM8915_AIF2RX_CHAN0_ENA_MASK 0x0100 /* AIF2RX_CHAN0_ENA */ -#define WM8915_AIF2RX_CHAN0_ENA_SHIFT 8 /* AIF2RX_CHAN0_ENA */ -#define WM8915_AIF2RX_CHAN0_ENA_WIDTH 1 /* AIF2RX_CHAN0_ENA */ -#define WM8915_AIF1RX_CHAN5_ENA 0x0020 /* AIF1RX_CHAN5_ENA */ -#define WM8915_AIF1RX_CHAN5_ENA_MASK 0x0020 /* AIF1RX_CHAN5_ENA */ -#define WM8915_AIF1RX_CHAN5_ENA_SHIFT 5 /* AIF1RX_CHAN5_ENA */ -#define WM8915_AIF1RX_CHAN5_ENA_WIDTH 1 /* AIF1RX_CHAN5_ENA */ -#define WM8915_AIF1RX_CHAN4_ENA 0x0010 /* AIF1RX_CHAN4_ENA */ -#define WM8915_AIF1RX_CHAN4_ENA_MASK 0x0010 /* AIF1RX_CHAN4_ENA */ -#define WM8915_AIF1RX_CHAN4_ENA_SHIFT 4 /* AIF1RX_CHAN4_ENA */ -#define WM8915_AIF1RX_CHAN4_ENA_WIDTH 1 /* AIF1RX_CHAN4_ENA */ -#define WM8915_AIF1RX_CHAN3_ENA 0x0008 /* AIF1RX_CHAN3_ENA */ -#define WM8915_AIF1RX_CHAN3_ENA_MASK 0x0008 /* AIF1RX_CHAN3_ENA */ -#define WM8915_AIF1RX_CHAN3_ENA_SHIFT 3 /* AIF1RX_CHAN3_ENA */ -#define WM8915_AIF1RX_CHAN3_ENA_WIDTH 1 /* AIF1RX_CHAN3_ENA */ -#define WM8915_AIF1RX_CHAN2_ENA 0x0004 /* AIF1RX_CHAN2_ENA */ -#define WM8915_AIF1RX_CHAN2_ENA_MASK 0x0004 /* AIF1RX_CHAN2_ENA */ -#define WM8915_AIF1RX_CHAN2_ENA_SHIFT 2 /* AIF1RX_CHAN2_ENA */ -#define WM8915_AIF1RX_CHAN2_ENA_WIDTH 1 /* AIF1RX_CHAN2_ENA */ -#define WM8915_AIF1RX_CHAN1_ENA 0x0002 /* AIF1RX_CHAN1_ENA */ -#define WM8915_AIF1RX_CHAN1_ENA_MASK 0x0002 /* AIF1RX_CHAN1_ENA */ -#define WM8915_AIF1RX_CHAN1_ENA_SHIFT 1 /* AIF1RX_CHAN1_ENA */ -#define WM8915_AIF1RX_CHAN1_ENA_WIDTH 1 /* AIF1RX_CHAN1_ENA */ -#define WM8915_AIF1RX_CHAN0_ENA 0x0001 /* AIF1RX_CHAN0_ENA */ -#define WM8915_AIF1RX_CHAN0_ENA_MASK 0x0001 /* AIF1RX_CHAN0_ENA */ -#define WM8915_AIF1RX_CHAN0_ENA_SHIFT 0 /* AIF1RX_CHAN0_ENA */ -#define WM8915_AIF1RX_CHAN0_ENA_WIDTH 1 /* AIF1RX_CHAN0_ENA */ - -/* - * R5 (0x05) - Power Management (5) - */ -#define WM8915_DSP2TXL_ENA 0x0800 /* DSP2TXL_ENA */ -#define WM8915_DSP2TXL_ENA_MASK 0x0800 /* DSP2TXL_ENA */ -#define WM8915_DSP2TXL_ENA_SHIFT 11 /* DSP2TXL_ENA */ -#define WM8915_DSP2TXL_ENA_WIDTH 1 /* DSP2TXL_ENA */ -#define WM8915_DSP2TXR_ENA 0x0400 /* DSP2TXR_ENA */ -#define WM8915_DSP2TXR_ENA_MASK 0x0400 /* DSP2TXR_ENA */ -#define WM8915_DSP2TXR_ENA_SHIFT 10 /* DSP2TXR_ENA */ -#define WM8915_DSP2TXR_ENA_WIDTH 1 /* DSP2TXR_ENA */ -#define WM8915_DSP1TXL_ENA 0x0200 /* DSP1TXL_ENA */ -#define WM8915_DSP1TXL_ENA_MASK 0x0200 /* DSP1TXL_ENA */ -#define WM8915_DSP1TXL_ENA_SHIFT 9 /* DSP1TXL_ENA */ -#define WM8915_DSP1TXL_ENA_WIDTH 1 /* DSP1TXL_ENA */ -#define WM8915_DSP1TXR_ENA 0x0100 /* DSP1TXR_ENA */ -#define WM8915_DSP1TXR_ENA_MASK 0x0100 /* DSP1TXR_ENA */ -#define WM8915_DSP1TXR_ENA_SHIFT 8 /* DSP1TXR_ENA */ -#define WM8915_DSP1TXR_ENA_WIDTH 1 /* DSP1TXR_ENA */ -#define WM8915_DAC2L_ENA 0x0008 /* DAC2L_ENA */ -#define WM8915_DAC2L_ENA_MASK 0x0008 /* DAC2L_ENA */ -#define WM8915_DAC2L_ENA_SHIFT 3 /* DAC2L_ENA */ -#define WM8915_DAC2L_ENA_WIDTH 1 /* DAC2L_ENA */ -#define WM8915_DAC2R_ENA 0x0004 /* DAC2R_ENA */ -#define WM8915_DAC2R_ENA_MASK 0x0004 /* DAC2R_ENA */ -#define WM8915_DAC2R_ENA_SHIFT 2 /* DAC2R_ENA */ -#define WM8915_DAC2R_ENA_WIDTH 1 /* DAC2R_ENA */ -#define WM8915_DAC1L_ENA 0x0002 /* DAC1L_ENA */ -#define WM8915_DAC1L_ENA_MASK 0x0002 /* DAC1L_ENA */ -#define WM8915_DAC1L_ENA_SHIFT 1 /* DAC1L_ENA */ -#define WM8915_DAC1L_ENA_WIDTH 1 /* DAC1L_ENA */ -#define WM8915_DAC1R_ENA 0x0001 /* DAC1R_ENA */ -#define WM8915_DAC1R_ENA_MASK 0x0001 /* DAC1R_ENA */ -#define WM8915_DAC1R_ENA_SHIFT 0 /* DAC1R_ENA */ -#define WM8915_DAC1R_ENA_WIDTH 1 /* DAC1R_ENA */ - -/* - * R6 (0x06) - Power Management (6) - */ -#define WM8915_AIF2TX_CHAN1_ENA 0x0200 /* AIF2TX_CHAN1_ENA */ -#define WM8915_AIF2TX_CHAN1_ENA_MASK 0x0200 /* AIF2TX_CHAN1_ENA */ -#define WM8915_AIF2TX_CHAN1_ENA_SHIFT 9 /* AIF2TX_CHAN1_ENA */ -#define WM8915_AIF2TX_CHAN1_ENA_WIDTH 1 /* AIF2TX_CHAN1_ENA */ -#define WM8915_AIF2TX_CHAN0_ENA 0x0100 /* AIF2TX_CHAN0_ENA */ -#define WM8915_AIF2TX_CHAN0_ENA_MASK 0x0100 /* AIF2TX_CHAN0_ENA */ -#define WM8915_AIF2TX_CHAN0_ENA_SHIFT 8 /* AIF2TX_CHAN0_ENA */ -#define WM8915_AIF2TX_CHAN0_ENA_WIDTH 1 /* AIF2TX_CHAN0_ENA */ -#define WM8915_AIF1TX_CHAN5_ENA 0x0020 /* AIF1TX_CHAN5_ENA */ -#define WM8915_AIF1TX_CHAN5_ENA_MASK 0x0020 /* AIF1TX_CHAN5_ENA */ -#define WM8915_AIF1TX_CHAN5_ENA_SHIFT 5 /* AIF1TX_CHAN5_ENA */ -#define WM8915_AIF1TX_CHAN5_ENA_WIDTH 1 /* AIF1TX_CHAN5_ENA */ -#define WM8915_AIF1TX_CHAN4_ENA 0x0010 /* AIF1TX_CHAN4_ENA */ -#define WM8915_AIF1TX_CHAN4_ENA_MASK 0x0010 /* AIF1TX_CHAN4_ENA */ -#define WM8915_AIF1TX_CHAN4_ENA_SHIFT 4 /* AIF1TX_CHAN4_ENA */ -#define WM8915_AIF1TX_CHAN4_ENA_WIDTH 1 /* AIF1TX_CHAN4_ENA */ -#define WM8915_AIF1TX_CHAN3_ENA 0x0008 /* AIF1TX_CHAN3_ENA */ -#define WM8915_AIF1TX_CHAN3_ENA_MASK 0x0008 /* AIF1TX_CHAN3_ENA */ -#define WM8915_AIF1TX_CHAN3_ENA_SHIFT 3 /* AIF1TX_CHAN3_ENA */ -#define WM8915_AIF1TX_CHAN3_ENA_WIDTH 1 /* AIF1TX_CHAN3_ENA */ -#define WM8915_AIF1TX_CHAN2_ENA 0x0004 /* AIF1TX_CHAN2_ENA */ -#define WM8915_AIF1TX_CHAN2_ENA_MASK 0x0004 /* AIF1TX_CHAN2_ENA */ -#define WM8915_AIF1TX_CHAN2_ENA_SHIFT 2 /* AIF1TX_CHAN2_ENA */ -#define WM8915_AIF1TX_CHAN2_ENA_WIDTH 1 /* AIF1TX_CHAN2_ENA */ -#define WM8915_AIF1TX_CHAN1_ENA 0x0002 /* AIF1TX_CHAN1_ENA */ -#define WM8915_AIF1TX_CHAN1_ENA_MASK 0x0002 /* AIF1TX_CHAN1_ENA */ -#define WM8915_AIF1TX_CHAN1_ENA_SHIFT 1 /* AIF1TX_CHAN1_ENA */ -#define WM8915_AIF1TX_CHAN1_ENA_WIDTH 1 /* AIF1TX_CHAN1_ENA */ -#define WM8915_AIF1TX_CHAN0_ENA 0x0001 /* AIF1TX_CHAN0_ENA */ -#define WM8915_AIF1TX_CHAN0_ENA_MASK 0x0001 /* AIF1TX_CHAN0_ENA */ -#define WM8915_AIF1TX_CHAN0_ENA_SHIFT 0 /* AIF1TX_CHAN0_ENA */ -#define WM8915_AIF1TX_CHAN0_ENA_WIDTH 1 /* AIF1TX_CHAN0_ENA */ - -/* - * R7 (0x07) - Power Management (7) - */ -#define WM8915_DMIC2_FN 0x0200 /* DMIC2_FN */ -#define WM8915_DMIC2_FN_MASK 0x0200 /* DMIC2_FN */ -#define WM8915_DMIC2_FN_SHIFT 9 /* DMIC2_FN */ -#define WM8915_DMIC2_FN_WIDTH 1 /* DMIC2_FN */ -#define WM8915_DMIC1_FN 0x0100 /* DMIC1_FN */ -#define WM8915_DMIC1_FN_MASK 0x0100 /* DMIC1_FN */ -#define WM8915_DMIC1_FN_SHIFT 8 /* DMIC1_FN */ -#define WM8915_DMIC1_FN_WIDTH 1 /* DMIC1_FN */ -#define WM8915_ADC_DMIC_DSP2R_ENA 0x0080 /* ADC_DMIC_DSP2R_ENA */ -#define WM8915_ADC_DMIC_DSP2R_ENA_MASK 0x0080 /* ADC_DMIC_DSP2R_ENA */ -#define WM8915_ADC_DMIC_DSP2R_ENA_SHIFT 7 /* ADC_DMIC_DSP2R_ENA */ -#define WM8915_ADC_DMIC_DSP2R_ENA_WIDTH 1 /* ADC_DMIC_DSP2R_ENA */ -#define WM8915_ADC_DMIC_DSP2L_ENA 0x0040 /* ADC_DMIC_DSP2L_ENA */ -#define WM8915_ADC_DMIC_DSP2L_ENA_MASK 0x0040 /* ADC_DMIC_DSP2L_ENA */ -#define WM8915_ADC_DMIC_DSP2L_ENA_SHIFT 6 /* ADC_DMIC_DSP2L_ENA */ -#define WM8915_ADC_DMIC_DSP2L_ENA_WIDTH 1 /* ADC_DMIC_DSP2L_ENA */ -#define WM8915_ADC_DMIC_SRC2_MASK 0x0030 /* ADC_DMIC_SRC2 - [5:4] */ -#define WM8915_ADC_DMIC_SRC2_SHIFT 4 /* ADC_DMIC_SRC2 - [5:4] */ -#define WM8915_ADC_DMIC_SRC2_WIDTH 2 /* ADC_DMIC_SRC2 - [5:4] */ -#define WM8915_ADC_DMIC_DSP1R_ENA 0x0008 /* ADC_DMIC_DSP1R_ENA */ -#define WM8915_ADC_DMIC_DSP1R_ENA_MASK 0x0008 /* ADC_DMIC_DSP1R_ENA */ -#define WM8915_ADC_DMIC_DSP1R_ENA_SHIFT 3 /* ADC_DMIC_DSP1R_ENA */ -#define WM8915_ADC_DMIC_DSP1R_ENA_WIDTH 1 /* ADC_DMIC_DSP1R_ENA */ -#define WM8915_ADC_DMIC_DSP1L_ENA 0x0004 /* ADC_DMIC_DSP1L_ENA */ -#define WM8915_ADC_DMIC_DSP1L_ENA_MASK 0x0004 /* ADC_DMIC_DSP1L_ENA */ -#define WM8915_ADC_DMIC_DSP1L_ENA_SHIFT 2 /* ADC_DMIC_DSP1L_ENA */ -#define WM8915_ADC_DMIC_DSP1L_ENA_WIDTH 1 /* ADC_DMIC_DSP1L_ENA */ -#define WM8915_ADC_DMIC_SRC1_MASK 0x0003 /* ADC_DMIC_SRC1 - [1:0] */ -#define WM8915_ADC_DMIC_SRC1_SHIFT 0 /* ADC_DMIC_SRC1 - [1:0] */ -#define WM8915_ADC_DMIC_SRC1_WIDTH 2 /* ADC_DMIC_SRC1 - [1:0] */ - -/* - * R8 (0x08) - Power Management (8) - */ -#define WM8915_AIF2TX_SRC_MASK 0x00C0 /* AIF2TX_SRC - [7:6] */ -#define WM8915_AIF2TX_SRC_SHIFT 6 /* AIF2TX_SRC - [7:6] */ -#define WM8915_AIF2TX_SRC_WIDTH 2 /* AIF2TX_SRC - [7:6] */ -#define WM8915_DSP2RX_SRC 0x0010 /* DSP2RX_SRC */ -#define WM8915_DSP2RX_SRC_MASK 0x0010 /* DSP2RX_SRC */ -#define WM8915_DSP2RX_SRC_SHIFT 4 /* DSP2RX_SRC */ -#define WM8915_DSP2RX_SRC_WIDTH 1 /* DSP2RX_SRC */ -#define WM8915_DSP1RX_SRC 0x0001 /* DSP1RX_SRC */ -#define WM8915_DSP1RX_SRC_MASK 0x0001 /* DSP1RX_SRC */ -#define WM8915_DSP1RX_SRC_SHIFT 0 /* DSP1RX_SRC */ -#define WM8915_DSP1RX_SRC_WIDTH 1 /* DSP1RX_SRC */ - -/* - * R16 (0x10) - Left Line Input Volume - */ -#define WM8915_IN1_VU 0x0080 /* IN1_VU */ -#define WM8915_IN1_VU_MASK 0x0080 /* IN1_VU */ -#define WM8915_IN1_VU_SHIFT 7 /* IN1_VU */ -#define WM8915_IN1_VU_WIDTH 1 /* IN1_VU */ -#define WM8915_IN1L_ZC 0x0020 /* IN1L_ZC */ -#define WM8915_IN1L_ZC_MASK 0x0020 /* IN1L_ZC */ -#define WM8915_IN1L_ZC_SHIFT 5 /* IN1L_ZC */ -#define WM8915_IN1L_ZC_WIDTH 1 /* IN1L_ZC */ -#define WM8915_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */ -#define WM8915_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */ -#define WM8915_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */ - -/* - * R17 (0x11) - Right Line Input Volume - */ -#define WM8915_IN1_VU 0x0080 /* IN1_VU */ -#define WM8915_IN1_VU_MASK 0x0080 /* IN1_VU */ -#define WM8915_IN1_VU_SHIFT 7 /* IN1_VU */ -#define WM8915_IN1_VU_WIDTH 1 /* IN1_VU */ -#define WM8915_IN1R_ZC 0x0020 /* IN1R_ZC */ -#define WM8915_IN1R_ZC_MASK 0x0020 /* IN1R_ZC */ -#define WM8915_IN1R_ZC_SHIFT 5 /* IN1R_ZC */ -#define WM8915_IN1R_ZC_WIDTH 1 /* IN1R_ZC */ -#define WM8915_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */ -#define WM8915_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */ -#define WM8915_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */ - -/* - * R18 (0x12) - Line Input Control - */ -#define WM8915_INL_MODE_MASK 0x000C /* INL_MODE - [3:2] */ -#define WM8915_INL_MODE_SHIFT 2 /* INL_MODE - [3:2] */ -#define WM8915_INL_MODE_WIDTH 2 /* INL_MODE - [3:2] */ -#define WM8915_INR_MODE_MASK 0x0003 /* INR_MODE - [1:0] */ -#define WM8915_INR_MODE_SHIFT 0 /* INR_MODE - [1:0] */ -#define WM8915_INR_MODE_WIDTH 2 /* INR_MODE - [1:0] */ - -/* - * R21 (0x15) - DAC1 HPOUT1 Volume - */ -#define WM8915_DAC1R_HPOUT1R_VOL_MASK 0x00F0 /* DAC1R_HPOUT1R_VOL - [7:4] */ -#define WM8915_DAC1R_HPOUT1R_VOL_SHIFT 4 /* DAC1R_HPOUT1R_VOL - [7:4] */ -#define WM8915_DAC1R_HPOUT1R_VOL_WIDTH 4 /* DAC1R_HPOUT1R_VOL - [7:4] */ -#define WM8915_DAC1L_HPOUT1L_VOL_MASK 0x000F /* DAC1L_HPOUT1L_VOL - [3:0] */ -#define WM8915_DAC1L_HPOUT1L_VOL_SHIFT 0 /* DAC1L_HPOUT1L_VOL - [3:0] */ -#define WM8915_DAC1L_HPOUT1L_VOL_WIDTH 4 /* DAC1L_HPOUT1L_VOL - [3:0] */ - -/* - * R22 (0x16) - DAC2 HPOUT2 Volume - */ -#define WM8915_DAC2R_HPOUT2R_VOL_MASK 0x00F0 /* DAC2R_HPOUT2R_VOL - [7:4] */ -#define WM8915_DAC2R_HPOUT2R_VOL_SHIFT 4 /* DAC2R_HPOUT2R_VOL - [7:4] */ -#define WM8915_DAC2R_HPOUT2R_VOL_WIDTH 4 /* DAC2R_HPOUT2R_VOL - [7:4] */ -#define WM8915_DAC2L_HPOUT2L_VOL_MASK 0x000F /* DAC2L_HPOUT2L_VOL - [3:0] */ -#define WM8915_DAC2L_HPOUT2L_VOL_SHIFT 0 /* DAC2L_HPOUT2L_VOL - [3:0] */ -#define WM8915_DAC2L_HPOUT2L_VOL_WIDTH 4 /* DAC2L_HPOUT2L_VOL - [3:0] */ - -/* - * R24 (0x18) - DAC1 Left Volume - */ -#define WM8915_DAC1L_MUTE 0x0200 /* DAC1L_MUTE */ -#define WM8915_DAC1L_MUTE_MASK 0x0200 /* DAC1L_MUTE */ -#define WM8915_DAC1L_MUTE_SHIFT 9 /* DAC1L_MUTE */ -#define WM8915_DAC1L_MUTE_WIDTH 1 /* DAC1L_MUTE */ -#define WM8915_DAC1_VU 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_MASK 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_SHIFT 8 /* DAC1_VU */ -#define WM8915_DAC1_VU_WIDTH 1 /* DAC1_VU */ -#define WM8915_DAC1L_VOL_MASK 0x00FF /* DAC1L_VOL - [7:0] */ -#define WM8915_DAC1L_VOL_SHIFT 0 /* DAC1L_VOL - [7:0] */ -#define WM8915_DAC1L_VOL_WIDTH 8 /* DAC1L_VOL - [7:0] */ - -/* - * R25 (0x19) - DAC1 Right Volume - */ -#define WM8915_DAC1R_MUTE 0x0200 /* DAC1R_MUTE */ -#define WM8915_DAC1R_MUTE_MASK 0x0200 /* DAC1R_MUTE */ -#define WM8915_DAC1R_MUTE_SHIFT 9 /* DAC1R_MUTE */ -#define WM8915_DAC1R_MUTE_WIDTH 1 /* DAC1R_MUTE */ -#define WM8915_DAC1_VU 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_MASK 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_SHIFT 8 /* DAC1_VU */ -#define WM8915_DAC1_VU_WIDTH 1 /* DAC1_VU */ -#define WM8915_DAC1R_VOL_MASK 0x00FF /* DAC1R_VOL - [7:0] */ -#define WM8915_DAC1R_VOL_SHIFT 0 /* DAC1R_VOL - [7:0] */ -#define WM8915_DAC1R_VOL_WIDTH 8 /* DAC1R_VOL - [7:0] */ - -/* - * R26 (0x1A) - DAC2 Left Volume - */ -#define WM8915_DAC2L_MUTE 0x0200 /* DAC2L_MUTE */ -#define WM8915_DAC2L_MUTE_MASK 0x0200 /* DAC2L_MUTE */ -#define WM8915_DAC2L_MUTE_SHIFT 9 /* DAC2L_MUTE */ -#define WM8915_DAC2L_MUTE_WIDTH 1 /* DAC2L_MUTE */ -#define WM8915_DAC2_VU 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_MASK 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_SHIFT 8 /* DAC2_VU */ -#define WM8915_DAC2_VU_WIDTH 1 /* DAC2_VU */ -#define WM8915_DAC2L_VOL_MASK 0x00FF /* DAC2L_VOL - [7:0] */ -#define WM8915_DAC2L_VOL_SHIFT 0 /* DAC2L_VOL - [7:0] */ -#define WM8915_DAC2L_VOL_WIDTH 8 /* DAC2L_VOL - [7:0] */ - -/* - * R27 (0x1B) - DAC2 Right Volume - */ -#define WM8915_DAC2R_MUTE 0x0200 /* DAC2R_MUTE */ -#define WM8915_DAC2R_MUTE_MASK 0x0200 /* DAC2R_MUTE */ -#define WM8915_DAC2R_MUTE_SHIFT 9 /* DAC2R_MUTE */ -#define WM8915_DAC2R_MUTE_WIDTH 1 /* DAC2R_MUTE */ -#define WM8915_DAC2_VU 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_MASK 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_SHIFT 8 /* DAC2_VU */ -#define WM8915_DAC2_VU_WIDTH 1 /* DAC2_VU */ -#define WM8915_DAC2R_VOL_MASK 0x00FF /* DAC2R_VOL - [7:0] */ -#define WM8915_DAC2R_VOL_SHIFT 0 /* DAC2R_VOL - [7:0] */ -#define WM8915_DAC2R_VOL_WIDTH 8 /* DAC2R_VOL - [7:0] */ - -/* - * R28 (0x1C) - Output1 Left Volume - */ -#define WM8915_DAC1_VU 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_MASK 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_SHIFT 8 /* DAC1_VU */ -#define WM8915_DAC1_VU_WIDTH 1 /* DAC1_VU */ -#define WM8915_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */ -#define WM8915_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */ -#define WM8915_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */ -#define WM8915_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */ -#define WM8915_HPOUT1L_VOL_MASK 0x000F /* HPOUT1L_VOL - [3:0] */ -#define WM8915_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [3:0] */ -#define WM8915_HPOUT1L_VOL_WIDTH 4 /* HPOUT1L_VOL - [3:0] */ - -/* - * R29 (0x1D) - Output1 Right Volume - */ -#define WM8915_DAC1_VU 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_MASK 0x0100 /* DAC1_VU */ -#define WM8915_DAC1_VU_SHIFT 8 /* DAC1_VU */ -#define WM8915_DAC1_VU_WIDTH 1 /* DAC1_VU */ -#define WM8915_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */ -#define WM8915_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */ -#define WM8915_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */ -#define WM8915_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */ -#define WM8915_HPOUT1R_VOL_MASK 0x000F /* HPOUT1R_VOL - [3:0] */ -#define WM8915_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [3:0] */ -#define WM8915_HPOUT1R_VOL_WIDTH 4 /* HPOUT1R_VOL - [3:0] */ - -/* - * R30 (0x1E) - Output2 Left Volume - */ -#define WM8915_DAC2_VU 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_MASK 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_SHIFT 8 /* DAC2_VU */ -#define WM8915_DAC2_VU_WIDTH 1 /* DAC2_VU */ -#define WM8915_HPOUT2L_ZC 0x0080 /* HPOUT2L_ZC */ -#define WM8915_HPOUT2L_ZC_MASK 0x0080 /* HPOUT2L_ZC */ -#define WM8915_HPOUT2L_ZC_SHIFT 7 /* HPOUT2L_ZC */ -#define WM8915_HPOUT2L_ZC_WIDTH 1 /* HPOUT2L_ZC */ -#define WM8915_HPOUT2L_VOL_MASK 0x000F /* HPOUT2L_VOL - [3:0] */ -#define WM8915_HPOUT2L_VOL_SHIFT 0 /* HPOUT2L_VOL - [3:0] */ -#define WM8915_HPOUT2L_VOL_WIDTH 4 /* HPOUT2L_VOL - [3:0] */ - -/* - * R31 (0x1F) - Output2 Right Volume - */ -#define WM8915_DAC2_VU 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_MASK 0x0100 /* DAC2_VU */ -#define WM8915_DAC2_VU_SHIFT 8 /* DAC2_VU */ -#define WM8915_DAC2_VU_WIDTH 1 /* DAC2_VU */ -#define WM8915_HPOUT2R_ZC 0x0080 /* HPOUT2R_ZC */ -#define WM8915_HPOUT2R_ZC_MASK 0x0080 /* HPOUT2R_ZC */ -#define WM8915_HPOUT2R_ZC_SHIFT 7 /* HPOUT2R_ZC */ -#define WM8915_HPOUT2R_ZC_WIDTH 1 /* HPOUT2R_ZC */ -#define WM8915_HPOUT2R_VOL_MASK 0x000F /* HPOUT2R_VOL - [3:0] */ -#define WM8915_HPOUT2R_VOL_SHIFT 0 /* HPOUT2R_VOL - [3:0] */ -#define WM8915_HPOUT2R_VOL_WIDTH 4 /* HPOUT2R_VOL - [3:0] */ - -/* - * R32 (0x20) - MICBIAS (1) - */ -#define WM8915_MICB1_RATE 0x0020 /* MICB1_RATE */ -#define WM8915_MICB1_RATE_MASK 0x0020 /* MICB1_RATE */ -#define WM8915_MICB1_RATE_SHIFT 5 /* MICB1_RATE */ -#define WM8915_MICB1_RATE_WIDTH 1 /* MICB1_RATE */ -#define WM8915_MICB1_MODE 0x0010 /* MICB1_MODE */ -#define WM8915_MICB1_MODE_MASK 0x0010 /* MICB1_MODE */ -#define WM8915_MICB1_MODE_SHIFT 4 /* MICB1_MODE */ -#define WM8915_MICB1_MODE_WIDTH 1 /* MICB1_MODE */ -#define WM8915_MICB1_LVL_MASK 0x000E /* MICB1_LVL - [3:1] */ -#define WM8915_MICB1_LVL_SHIFT 1 /* MICB1_LVL - [3:1] */ -#define WM8915_MICB1_LVL_WIDTH 3 /* MICB1_LVL - [3:1] */ -#define WM8915_MICB1_DISCH 0x0001 /* MICB1_DISCH */ -#define WM8915_MICB1_DISCH_MASK 0x0001 /* MICB1_DISCH */ -#define WM8915_MICB1_DISCH_SHIFT 0 /* MICB1_DISCH */ -#define WM8915_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ - -/* - * R33 (0x21) - MICBIAS (2) - */ -#define WM8915_MICB2_RATE 0x0020 /* MICB2_RATE */ -#define WM8915_MICB2_RATE_MASK 0x0020 /* MICB2_RATE */ -#define WM8915_MICB2_RATE_SHIFT 5 /* MICB2_RATE */ -#define WM8915_MICB2_RATE_WIDTH 1 /* MICB2_RATE */ -#define WM8915_MICB2_MODE 0x0010 /* MICB2_MODE */ -#define WM8915_MICB2_MODE_MASK 0x0010 /* MICB2_MODE */ -#define WM8915_MICB2_MODE_SHIFT 4 /* MICB2_MODE */ -#define WM8915_MICB2_MODE_WIDTH 1 /* MICB2_MODE */ -#define WM8915_MICB2_LVL_MASK 0x000E /* MICB2_LVL - [3:1] */ -#define WM8915_MICB2_LVL_SHIFT 1 /* MICB2_LVL - [3:1] */ -#define WM8915_MICB2_LVL_WIDTH 3 /* MICB2_LVL - [3:1] */ -#define WM8915_MICB2_DISCH 0x0001 /* MICB2_DISCH */ -#define WM8915_MICB2_DISCH_MASK 0x0001 /* MICB2_DISCH */ -#define WM8915_MICB2_DISCH_SHIFT 0 /* MICB2_DISCH */ -#define WM8915_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ - -/* - * R40 (0x28) - LDO 1 - */ -#define WM8915_LDO1_MODE 0x0020 /* LDO1_MODE */ -#define WM8915_LDO1_MODE_MASK 0x0020 /* LDO1_MODE */ -#define WM8915_LDO1_MODE_SHIFT 5 /* LDO1_MODE */ -#define WM8915_LDO1_MODE_WIDTH 1 /* LDO1_MODE */ -#define WM8915_LDO1_VSEL_MASK 0x0006 /* LDO1_VSEL - [2:1] */ -#define WM8915_LDO1_VSEL_SHIFT 1 /* LDO1_VSEL - [2:1] */ -#define WM8915_LDO1_VSEL_WIDTH 2 /* LDO1_VSEL - [2:1] */ -#define WM8915_LDO1_DISCH 0x0001 /* LDO1_DISCH */ -#define WM8915_LDO1_DISCH_MASK 0x0001 /* LDO1_DISCH */ -#define WM8915_LDO1_DISCH_SHIFT 0 /* LDO1_DISCH */ -#define WM8915_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */ - -/* - * R41 (0x29) - LDO 2 - */ -#define WM8915_LDO2_MODE 0x0020 /* LDO2_MODE */ -#define WM8915_LDO2_MODE_MASK 0x0020 /* LDO2_MODE */ -#define WM8915_LDO2_MODE_SHIFT 5 /* LDO2_MODE */ -#define WM8915_LDO2_MODE_WIDTH 1 /* LDO2_MODE */ -#define WM8915_LDO2_VSEL_MASK 0x001E /* LDO2_VSEL - [4:1] */ -#define WM8915_LDO2_VSEL_SHIFT 1 /* LDO2_VSEL - [4:1] */ -#define WM8915_LDO2_VSEL_WIDTH 4 /* LDO2_VSEL - [4:1] */ -#define WM8915_LDO2_DISCH 0x0001 /* LDO2_DISCH */ -#define WM8915_LDO2_DISCH_MASK 0x0001 /* LDO2_DISCH */ -#define WM8915_LDO2_DISCH_SHIFT 0 /* LDO2_DISCH */ -#define WM8915_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */ - -/* - * R48 (0x30) - Accessory Detect Mode 1 - */ -#define WM8915_JD_MODE_MASK 0x0003 /* JD_MODE - [1:0] */ -#define WM8915_JD_MODE_SHIFT 0 /* JD_MODE - [1:0] */ -#define WM8915_JD_MODE_WIDTH 2 /* JD_MODE - [1:0] */ - -/* - * R49 (0x31) - Accessory Detect Mode 2 - */ -#define WM8915_HPOUT1FB_SRC 0x0004 /* HPOUT1FB_SRC */ -#define WM8915_HPOUT1FB_SRC_MASK 0x0004 /* HPOUT1FB_SRC */ -#define WM8915_HPOUT1FB_SRC_SHIFT 2 /* HPOUT1FB_SRC */ -#define WM8915_HPOUT1FB_SRC_WIDTH 1 /* HPOUT1FB_SRC */ -#define WM8915_MICD_SRC 0x0002 /* MICD_SRC */ -#define WM8915_MICD_SRC_MASK 0x0002 /* MICD_SRC */ -#define WM8915_MICD_SRC_SHIFT 1 /* MICD_SRC */ -#define WM8915_MICD_SRC_WIDTH 1 /* MICD_SRC */ -#define WM8915_MICD_BIAS_SRC 0x0001 /* MICD_BIAS_SRC */ -#define WM8915_MICD_BIAS_SRC_MASK 0x0001 /* MICD_BIAS_SRC */ -#define WM8915_MICD_BIAS_SRC_SHIFT 0 /* MICD_BIAS_SRC */ -#define WM8915_MICD_BIAS_SRC_WIDTH 1 /* MICD_BIAS_SRC */ - -/* - * R52 (0x34) - Headphone Detect 1 - */ -#define WM8915_HP_HOLDTIME_MASK 0x00E0 /* HP_HOLDTIME - [7:5] */ -#define WM8915_HP_HOLDTIME_SHIFT 5 /* HP_HOLDTIME - [7:5] */ -#define WM8915_HP_HOLDTIME_WIDTH 3 /* HP_HOLDTIME - [7:5] */ -#define WM8915_HP_CLK_DIV_MASK 0x0018 /* HP_CLK_DIV - [4:3] */ -#define WM8915_HP_CLK_DIV_SHIFT 3 /* HP_CLK_DIV - [4:3] */ -#define WM8915_HP_CLK_DIV_WIDTH 2 /* HP_CLK_DIV - [4:3] */ -#define WM8915_HP_STEP_SIZE 0x0002 /* HP_STEP_SIZE */ -#define WM8915_HP_STEP_SIZE_MASK 0x0002 /* HP_STEP_SIZE */ -#define WM8915_HP_STEP_SIZE_SHIFT 1 /* HP_STEP_SIZE */ -#define WM8915_HP_STEP_SIZE_WIDTH 1 /* HP_STEP_SIZE */ -#define WM8915_HP_POLL 0x0001 /* HP_POLL */ -#define WM8915_HP_POLL_MASK 0x0001 /* HP_POLL */ -#define WM8915_HP_POLL_SHIFT 0 /* HP_POLL */ -#define WM8915_HP_POLL_WIDTH 1 /* HP_POLL */ - -/* - * R53 (0x35) - Headphone Detect 2 - */ -#define WM8915_HP_DONE 0x0080 /* HP_DONE */ -#define WM8915_HP_DONE_MASK 0x0080 /* HP_DONE */ -#define WM8915_HP_DONE_SHIFT 7 /* HP_DONE */ -#define WM8915_HP_DONE_WIDTH 1 /* HP_DONE */ -#define WM8915_HP_LVL_MASK 0x007F /* HP_LVL - [6:0] */ -#define WM8915_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */ -#define WM8915_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */ - -/* - * R56 (0x38) - Mic Detect 1 - */ -#define WM8915_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */ -#define WM8915_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */ -#define WM8915_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */ -#define WM8915_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */ -#define WM8915_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */ -#define WM8915_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */ -#define WM8915_MICD_DBTIME 0x0002 /* MICD_DBTIME */ -#define WM8915_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ -#define WM8915_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ -#define WM8915_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ -#define WM8915_MICD_ENA 0x0001 /* MICD_ENA */ -#define WM8915_MICD_ENA_MASK 0x0001 /* MICD_ENA */ -#define WM8915_MICD_ENA_SHIFT 0 /* MICD_ENA */ -#define WM8915_MICD_ENA_WIDTH 1 /* MICD_ENA */ - -/* - * R57 (0x39) - Mic Detect 2 - */ -#define WM8915_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */ -#define WM8915_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */ -#define WM8915_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */ - -/* - * R58 (0x3A) - Mic Detect 3 - */ -#define WM8915_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ -#define WM8915_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ -#define WM8915_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ -#define WM8915_MICD_VALID 0x0002 /* MICD_VALID */ -#define WM8915_MICD_VALID_MASK 0x0002 /* MICD_VALID */ -#define WM8915_MICD_VALID_SHIFT 1 /* MICD_VALID */ -#define WM8915_MICD_VALID_WIDTH 1 /* MICD_VALID */ -#define WM8915_MICD_STS 0x0001 /* MICD_STS */ -#define WM8915_MICD_STS_MASK 0x0001 /* MICD_STS */ -#define WM8915_MICD_STS_SHIFT 0 /* MICD_STS */ -#define WM8915_MICD_STS_WIDTH 1 /* MICD_STS */ - -/* - * R64 (0x40) - Charge Pump (1) - */ -#define WM8915_CP_ENA 0x8000 /* CP_ENA */ -#define WM8915_CP_ENA_MASK 0x8000 /* CP_ENA */ -#define WM8915_CP_ENA_SHIFT 15 /* CP_ENA */ -#define WM8915_CP_ENA_WIDTH 1 /* CP_ENA */ - -/* - * R65 (0x41) - Charge Pump (2) - */ -#define WM8915_CP_DISCH 0x8000 /* CP_DISCH */ -#define WM8915_CP_DISCH_MASK 0x8000 /* CP_DISCH */ -#define WM8915_CP_DISCH_SHIFT 15 /* CP_DISCH */ -#define WM8915_CP_DISCH_WIDTH 1 /* CP_DISCH */ - -/* - * R80 (0x50) - DC Servo (1) - */ -#define WM8915_DCS_ENA_CHAN_3 0x0008 /* DCS_ENA_CHAN_3 */ -#define WM8915_DCS_ENA_CHAN_3_MASK 0x0008 /* DCS_ENA_CHAN_3 */ -#define WM8915_DCS_ENA_CHAN_3_SHIFT 3 /* DCS_ENA_CHAN_3 */ -#define WM8915_DCS_ENA_CHAN_3_WIDTH 1 /* DCS_ENA_CHAN_3 */ -#define WM8915_DCS_ENA_CHAN_2 0x0004 /* DCS_ENA_CHAN_2 */ -#define WM8915_DCS_ENA_CHAN_2_MASK 0x0004 /* DCS_ENA_CHAN_2 */ -#define WM8915_DCS_ENA_CHAN_2_SHIFT 2 /* DCS_ENA_CHAN_2 */ -#define WM8915_DCS_ENA_CHAN_2_WIDTH 1 /* DCS_ENA_CHAN_2 */ -#define WM8915_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */ -#define WM8915_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */ -#define WM8915_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */ -#define WM8915_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */ -#define WM8915_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */ -#define WM8915_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */ -#define WM8915_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */ -#define WM8915_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */ - -/* - * R81 (0x51) - DC Servo (2) - */ -#define WM8915_DCS_TRIG_SINGLE_3 0x8000 /* DCS_TRIG_SINGLE_3 */ -#define WM8915_DCS_TRIG_SINGLE_3_MASK 0x8000 /* DCS_TRIG_SINGLE_3 */ -#define WM8915_DCS_TRIG_SINGLE_3_SHIFT 15 /* DCS_TRIG_SINGLE_3 */ -#define WM8915_DCS_TRIG_SINGLE_3_WIDTH 1 /* DCS_TRIG_SINGLE_3 */ -#define WM8915_DCS_TRIG_SINGLE_2 0x4000 /* DCS_TRIG_SINGLE_2 */ -#define WM8915_DCS_TRIG_SINGLE_2_MASK 0x4000 /* DCS_TRIG_SINGLE_2 */ -#define WM8915_DCS_TRIG_SINGLE_2_SHIFT 14 /* DCS_TRIG_SINGLE_2 */ -#define WM8915_DCS_TRIG_SINGLE_2_WIDTH 1 /* DCS_TRIG_SINGLE_2 */ -#define WM8915_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */ -#define WM8915_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */ -#define WM8915_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */ -#define WM8915_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */ -#define WM8915_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */ -#define WM8915_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */ -#define WM8915_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */ -#define WM8915_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */ -#define WM8915_DCS_TRIG_SERIES_3 0x0800 /* DCS_TRIG_SERIES_3 */ -#define WM8915_DCS_TRIG_SERIES_3_MASK 0x0800 /* DCS_TRIG_SERIES_3 */ -#define WM8915_DCS_TRIG_SERIES_3_SHIFT 11 /* DCS_TRIG_SERIES_3 */ -#define WM8915_DCS_TRIG_SERIES_3_WIDTH 1 /* DCS_TRIG_SERIES_3 */ -#define WM8915_DCS_TRIG_SERIES_2 0x0400 /* DCS_TRIG_SERIES_2 */ -#define WM8915_DCS_TRIG_SERIES_2_MASK 0x0400 /* DCS_TRIG_SERIES_2 */ -#define WM8915_DCS_TRIG_SERIES_2_SHIFT 10 /* DCS_TRIG_SERIES_2 */ -#define WM8915_DCS_TRIG_SERIES_2_WIDTH 1 /* DCS_TRIG_SERIES_2 */ -#define WM8915_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */ -#define WM8915_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */ -#define WM8915_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */ -#define WM8915_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */ -#define WM8915_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */ -#define WM8915_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */ -#define WM8915_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */ -#define WM8915_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */ -#define WM8915_DCS_TRIG_STARTUP_3 0x0080 /* DCS_TRIG_STARTUP_3 */ -#define WM8915_DCS_TRIG_STARTUP_3_MASK 0x0080 /* DCS_TRIG_STARTUP_3 */ -#define WM8915_DCS_TRIG_STARTUP_3_SHIFT 7 /* DCS_TRIG_STARTUP_3 */ -#define WM8915_DCS_TRIG_STARTUP_3_WIDTH 1 /* DCS_TRIG_STARTUP_3 */ -#define WM8915_DCS_TRIG_STARTUP_2 0x0040 /* DCS_TRIG_STARTUP_2 */ -#define WM8915_DCS_TRIG_STARTUP_2_MASK 0x0040 /* DCS_TRIG_STARTUP_2 */ -#define WM8915_DCS_TRIG_STARTUP_2_SHIFT 6 /* DCS_TRIG_STARTUP_2 */ -#define WM8915_DCS_TRIG_STARTUP_2_WIDTH 1 /* DCS_TRIG_STARTUP_2 */ -#define WM8915_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */ -#define WM8915_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */ -#define WM8915_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */ -#define WM8915_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */ -#define WM8915_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */ -#define WM8915_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */ -#define WM8915_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */ -#define WM8915_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */ -#define WM8915_DCS_TRIG_DAC_WR_3 0x0008 /* DCS_TRIG_DAC_WR_3 */ -#define WM8915_DCS_TRIG_DAC_WR_3_MASK 0x0008 /* DCS_TRIG_DAC_WR_3 */ -#define WM8915_DCS_TRIG_DAC_WR_3_SHIFT 3 /* DCS_TRIG_DAC_WR_3 */ -#define WM8915_DCS_TRIG_DAC_WR_3_WIDTH 1 /* DCS_TRIG_DAC_WR_3 */ -#define WM8915_DCS_TRIG_DAC_WR_2 0x0004 /* DCS_TRIG_DAC_WR_2 */ -#define WM8915_DCS_TRIG_DAC_WR_2_MASK 0x0004 /* DCS_TRIG_DAC_WR_2 */ -#define WM8915_DCS_TRIG_DAC_WR_2_SHIFT 2 /* DCS_TRIG_DAC_WR_2 */ -#define WM8915_DCS_TRIG_DAC_WR_2_WIDTH 1 /* DCS_TRIG_DAC_WR_2 */ -#define WM8915_DCS_TRIG_DAC_WR_1 0x0002 /* DCS_TRIG_DAC_WR_1 */ -#define WM8915_DCS_TRIG_DAC_WR_1_MASK 0x0002 /* DCS_TRIG_DAC_WR_1 */ -#define WM8915_DCS_TRIG_DAC_WR_1_SHIFT 1 /* DCS_TRIG_DAC_WR_1 */ -#define WM8915_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */ -#define WM8915_DCS_TRIG_DAC_WR_0 0x0001 /* DCS_TRIG_DAC_WR_0 */ -#define WM8915_DCS_TRIG_DAC_WR_0_MASK 0x0001 /* DCS_TRIG_DAC_WR_0 */ -#define WM8915_DCS_TRIG_DAC_WR_0_SHIFT 0 /* DCS_TRIG_DAC_WR_0 */ -#define WM8915_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */ - -/* - * R82 (0x52) - DC Servo (3) - */ -#define WM8915_DCS_TIMER_PERIOD_23_MASK 0x0F00 /* DCS_TIMER_PERIOD_23 - [11:8] */ -#define WM8915_DCS_TIMER_PERIOD_23_SHIFT 8 /* DCS_TIMER_PERIOD_23 - [11:8] */ -#define WM8915_DCS_TIMER_PERIOD_23_WIDTH 4 /* DCS_TIMER_PERIOD_23 - [11:8] */ -#define WM8915_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */ -#define WM8915_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */ -#define WM8915_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */ - -/* - * R84 (0x54) - DC Servo (5) - */ -#define WM8915_DCS_SERIES_NO_23_MASK 0x7F00 /* DCS_SERIES_NO_23 - [14:8] */ -#define WM8915_DCS_SERIES_NO_23_SHIFT 8 /* DCS_SERIES_NO_23 - [14:8] */ -#define WM8915_DCS_SERIES_NO_23_WIDTH 7 /* DCS_SERIES_NO_23 - [14:8] */ -#define WM8915_DCS_SERIES_NO_01_MASK 0x007F /* DCS_SERIES_NO_01 - [6:0] */ -#define WM8915_DCS_SERIES_NO_01_SHIFT 0 /* DCS_SERIES_NO_01 - [6:0] */ -#define WM8915_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [6:0] */ - -/* - * R85 (0x55) - DC Servo (6) - */ -#define WM8915_DCS_DAC_WR_VAL_3_MASK 0xFF00 /* DCS_DAC_WR_VAL_3 - [15:8] */ -#define WM8915_DCS_DAC_WR_VAL_3_SHIFT 8 /* DCS_DAC_WR_VAL_3 - [15:8] */ -#define WM8915_DCS_DAC_WR_VAL_3_WIDTH 8 /* DCS_DAC_WR_VAL_3 - [15:8] */ -#define WM8915_DCS_DAC_WR_VAL_2_MASK 0x00FF /* DCS_DAC_WR_VAL_2 - [7:0] */ -#define WM8915_DCS_DAC_WR_VAL_2_SHIFT 0 /* DCS_DAC_WR_VAL_2 - [7:0] */ -#define WM8915_DCS_DAC_WR_VAL_2_WIDTH 8 /* DCS_DAC_WR_VAL_2 - [7:0] */ - -/* - * R86 (0x56) - DC Servo (7) - */ -#define WM8915_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */ -#define WM8915_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ -#define WM8915_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ -#define WM8915_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */ -#define WM8915_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */ -#define WM8915_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */ - -/* - * R87 (0x57) - DC Servo Readback 0 - */ -#define WM8915_DCS_CAL_COMPLETE_MASK 0x0F00 /* DCS_CAL_COMPLETE - [11:8] */ -#define WM8915_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [11:8] */ -#define WM8915_DCS_CAL_COMPLETE_WIDTH 4 /* DCS_CAL_COMPLETE - [11:8] */ -#define WM8915_DCS_DAC_WR_COMPLETE_MASK 0x00F0 /* DCS_DAC_WR_COMPLETE - [7:4] */ -#define WM8915_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [7:4] */ -#define WM8915_DCS_DAC_WR_COMPLETE_WIDTH 4 /* DCS_DAC_WR_COMPLETE - [7:4] */ -#define WM8915_DCS_STARTUP_COMPLETE_MASK 0x000F /* DCS_STARTUP_COMPLETE - [3:0] */ -#define WM8915_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [3:0] */ -#define WM8915_DCS_STARTUP_COMPLETE_WIDTH 4 /* DCS_STARTUP_COMPLETE - [3:0] */ - -/* - * R96 (0x60) - Analogue HP (1) - */ -#define WM8915_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */ -#define WM8915_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */ -#define WM8915_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */ -#define WM8915_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */ -#define WM8915_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */ -#define WM8915_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */ -#define WM8915_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */ -#define WM8915_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */ -#define WM8915_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */ -#define WM8915_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */ -#define WM8915_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */ -#define WM8915_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */ -#define WM8915_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */ -#define WM8915_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */ -#define WM8915_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */ -#define WM8915_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */ -#define WM8915_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */ -#define WM8915_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */ -#define WM8915_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */ -#define WM8915_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */ -#define WM8915_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */ -#define WM8915_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */ -#define WM8915_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */ -#define WM8915_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */ - -/* - * R97 (0x61) - Analogue HP (2) - */ -#define WM8915_HPOUT2L_RMV_SHORT 0x0080 /* HPOUT2L_RMV_SHORT */ -#define WM8915_HPOUT2L_RMV_SHORT_MASK 0x0080 /* HPOUT2L_RMV_SHORT */ -#define WM8915_HPOUT2L_RMV_SHORT_SHIFT 7 /* HPOUT2L_RMV_SHORT */ -#define WM8915_HPOUT2L_RMV_SHORT_WIDTH 1 /* HPOUT2L_RMV_SHORT */ -#define WM8915_HPOUT2L_OUTP 0x0040 /* HPOUT2L_OUTP */ -#define WM8915_HPOUT2L_OUTP_MASK 0x0040 /* HPOUT2L_OUTP */ -#define WM8915_HPOUT2L_OUTP_SHIFT 6 /* HPOUT2L_OUTP */ -#define WM8915_HPOUT2L_OUTP_WIDTH 1 /* HPOUT2L_OUTP */ -#define WM8915_HPOUT2L_DLY 0x0020 /* HPOUT2L_DLY */ -#define WM8915_HPOUT2L_DLY_MASK 0x0020 /* HPOUT2L_DLY */ -#define WM8915_HPOUT2L_DLY_SHIFT 5 /* HPOUT2L_DLY */ -#define WM8915_HPOUT2L_DLY_WIDTH 1 /* HPOUT2L_DLY */ -#define WM8915_HPOUT2R_RMV_SHORT 0x0008 /* HPOUT2R_RMV_SHORT */ -#define WM8915_HPOUT2R_RMV_SHORT_MASK 0x0008 /* HPOUT2R_RMV_SHORT */ -#define WM8915_HPOUT2R_RMV_SHORT_SHIFT 3 /* HPOUT2R_RMV_SHORT */ -#define WM8915_HPOUT2R_RMV_SHORT_WIDTH 1 /* HPOUT2R_RMV_SHORT */ -#define WM8915_HPOUT2R_OUTP 0x0004 /* HPOUT2R_OUTP */ -#define WM8915_HPOUT2R_OUTP_MASK 0x0004 /* HPOUT2R_OUTP */ -#define WM8915_HPOUT2R_OUTP_SHIFT 2 /* HPOUT2R_OUTP */ -#define WM8915_HPOUT2R_OUTP_WIDTH 1 /* HPOUT2R_OUTP */ -#define WM8915_HPOUT2R_DLY 0x0002 /* HPOUT2R_DLY */ -#define WM8915_HPOUT2R_DLY_MASK 0x0002 /* HPOUT2R_DLY */ -#define WM8915_HPOUT2R_DLY_SHIFT 1 /* HPOUT2R_DLY */ -#define WM8915_HPOUT2R_DLY_WIDTH 1 /* HPOUT2R_DLY */ - -/* - * R256 (0x100) - Chip Revision - */ -#define WM8915_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */ -#define WM8915_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */ -#define WM8915_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */ - -/* - * R257 (0x101) - Control Interface (1) - */ -#define WM8915_AUTO_INC 0x0004 /* AUTO_INC */ -#define WM8915_AUTO_INC_MASK 0x0004 /* AUTO_INC */ -#define WM8915_AUTO_INC_SHIFT 2 /* AUTO_INC */ -#define WM8915_AUTO_INC_WIDTH 1 /* AUTO_INC */ - -/* - * R272 (0x110) - Write Sequencer Ctrl (1) - */ -#define WM8915_WSEQ_ENA 0x8000 /* WSEQ_ENA */ -#define WM8915_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */ -#define WM8915_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */ -#define WM8915_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */ -#define WM8915_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */ -#define WM8915_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */ -#define WM8915_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */ -#define WM8915_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */ -#define WM8915_WSEQ_START 0x0100 /* WSEQ_START */ -#define WM8915_WSEQ_START_MASK 0x0100 /* WSEQ_START */ -#define WM8915_WSEQ_START_SHIFT 8 /* WSEQ_START */ -#define WM8915_WSEQ_START_WIDTH 1 /* WSEQ_START */ -#define WM8915_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */ -#define WM8915_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */ -#define WM8915_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */ - -/* - * R273 (0x111) - Write Sequencer Ctrl (2) - */ -#define WM8915_WSEQ_BUSY 0x0100 /* WSEQ_BUSY */ -#define WM8915_WSEQ_BUSY_MASK 0x0100 /* WSEQ_BUSY */ -#define WM8915_WSEQ_BUSY_SHIFT 8 /* WSEQ_BUSY */ -#define WM8915_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */ -#define WM8915_WSEQ_CURRENT_INDEX_MASK 0x007F /* WSEQ_CURRENT_INDEX - [6:0] */ -#define WM8915_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [6:0] */ -#define WM8915_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [6:0] */ - -/* - * R512 (0x200) - AIF Clocking (1) - */ -#define WM8915_SYSCLK_SRC_MASK 0x0018 /* SYSCLK_SRC - [4:3] */ -#define WM8915_SYSCLK_SRC_SHIFT 3 /* SYSCLK_SRC - [4:3] */ -#define WM8915_SYSCLK_SRC_WIDTH 2 /* SYSCLK_SRC - [4:3] */ -#define WM8915_SYSCLK_INV 0x0004 /* SYSCLK_INV */ -#define WM8915_SYSCLK_INV_MASK 0x0004 /* SYSCLK_INV */ -#define WM8915_SYSCLK_INV_SHIFT 2 /* SYSCLK_INV */ -#define WM8915_SYSCLK_INV_WIDTH 1 /* SYSCLK_INV */ -#define WM8915_SYSCLK_DIV 0x0002 /* SYSCLK_DIV */ -#define WM8915_SYSCLK_DIV_MASK 0x0002 /* SYSCLK_DIV */ -#define WM8915_SYSCLK_DIV_SHIFT 1 /* SYSCLK_DIV */ -#define WM8915_SYSCLK_DIV_WIDTH 1 /* SYSCLK_DIV */ -#define WM8915_SYSCLK_ENA 0x0001 /* SYSCLK_ENA */ -#define WM8915_SYSCLK_ENA_MASK 0x0001 /* SYSCLK_ENA */ -#define WM8915_SYSCLK_ENA_SHIFT 0 /* SYSCLK_ENA */ -#define WM8915_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */ - -/* - * R513 (0x201) - AIF Clocking (2) - */ -#define WM8915_DSP2_DIV_MASK 0x0018 /* DSP2_DIV - [4:3] */ -#define WM8915_DSP2_DIV_SHIFT 3 /* DSP2_DIV - [4:3] */ -#define WM8915_DSP2_DIV_WIDTH 2 /* DSP2_DIV - [4:3] */ -#define WM8915_DSP1_DIV_MASK 0x0003 /* DSP1_DIV - [1:0] */ -#define WM8915_DSP1_DIV_SHIFT 0 /* DSP1_DIV - [1:0] */ -#define WM8915_DSP1_DIV_WIDTH 2 /* DSP1_DIV - [1:0] */ - -/* - * R520 (0x208) - Clocking (1) - */ -#define WM8915_LFCLK_ENA 0x0020 /* LFCLK_ENA */ -#define WM8915_LFCLK_ENA_MASK 0x0020 /* LFCLK_ENA */ -#define WM8915_LFCLK_ENA_SHIFT 5 /* LFCLK_ENA */ -#define WM8915_LFCLK_ENA_WIDTH 1 /* LFCLK_ENA */ -#define WM8915_TOCLK_ENA 0x0010 /* TOCLK_ENA */ -#define WM8915_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */ -#define WM8915_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */ -#define WM8915_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ -#define WM8915_AIFCLK_ENA 0x0004 /* AIFCLK_ENA */ -#define WM8915_AIFCLK_ENA_MASK 0x0004 /* AIFCLK_ENA */ -#define WM8915_AIFCLK_ENA_SHIFT 2 /* AIFCLK_ENA */ -#define WM8915_AIFCLK_ENA_WIDTH 1 /* AIFCLK_ENA */ -#define WM8915_SYSDSPCLK_ENA 0x0002 /* SYSDSPCLK_ENA */ -#define WM8915_SYSDSPCLK_ENA_MASK 0x0002 /* SYSDSPCLK_ENA */ -#define WM8915_SYSDSPCLK_ENA_SHIFT 1 /* SYSDSPCLK_ENA */ -#define WM8915_SYSDSPCLK_ENA_WIDTH 1 /* SYSDSPCLK_ENA */ - -/* - * R521 (0x209) - Clocking (2) - */ -#define WM8915_TOCLK_DIV_MASK 0x0700 /* TOCLK_DIV - [10:8] */ -#define WM8915_TOCLK_DIV_SHIFT 8 /* TOCLK_DIV - [10:8] */ -#define WM8915_TOCLK_DIV_WIDTH 3 /* TOCLK_DIV - [10:8] */ -#define WM8915_DBCLK_DIV_MASK 0x00F0 /* DBCLK_DIV - [7:4] */ -#define WM8915_DBCLK_DIV_SHIFT 4 /* DBCLK_DIV - [7:4] */ -#define WM8915_DBCLK_DIV_WIDTH 4 /* DBCLK_DIV - [7:4] */ -#define WM8915_OPCLK_DIV_MASK 0x0007 /* OPCLK_DIV - [2:0] */ -#define WM8915_OPCLK_DIV_SHIFT 0 /* OPCLK_DIV - [2:0] */ -#define WM8915_OPCLK_DIV_WIDTH 3 /* OPCLK_DIV - [2:0] */ - -/* - * R528 (0x210) - AIF Rate - */ -#define WM8915_SYSCLK_RATE 0x0001 /* SYSCLK_RATE */ -#define WM8915_SYSCLK_RATE_MASK 0x0001 /* SYSCLK_RATE */ -#define WM8915_SYSCLK_RATE_SHIFT 0 /* SYSCLK_RATE */ -#define WM8915_SYSCLK_RATE_WIDTH 1 /* SYSCLK_RATE */ - -/* - * R544 (0x220) - FLL Control (1) - */ -#define WM8915_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */ -#define WM8915_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */ -#define WM8915_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */ -#define WM8915_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ -#define WM8915_FLL_ENA 0x0001 /* FLL_ENA */ -#define WM8915_FLL_ENA_MASK 0x0001 /* FLL_ENA */ -#define WM8915_FLL_ENA_SHIFT 0 /* FLL_ENA */ -#define WM8915_FLL_ENA_WIDTH 1 /* FLL_ENA */ - -/* - * R545 (0x221) - FLL Control (2) - */ -#define WM8915_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */ -#define WM8915_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */ -#define WM8915_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */ -#define WM8915_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */ -#define WM8915_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */ -#define WM8915_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */ - -/* - * R546 (0x222) - FLL Control (3) - */ -#define WM8915_FLL_THETA_MASK 0xFFFF /* FLL_THETA - [15:0] */ -#define WM8915_FLL_THETA_SHIFT 0 /* FLL_THETA - [15:0] */ -#define WM8915_FLL_THETA_WIDTH 16 /* FLL_THETA - [15:0] */ - -/* - * R547 (0x223) - FLL Control (4) - */ -#define WM8915_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */ -#define WM8915_FLL_N_SHIFT 5 /* FLL_N - [14:5] */ -#define WM8915_FLL_N_WIDTH 10 /* FLL_N - [14:5] */ -#define WM8915_FLL_LOOP_GAIN_MASK 0x000F /* FLL_LOOP_GAIN - [3:0] */ -#define WM8915_FLL_LOOP_GAIN_SHIFT 0 /* FLL_LOOP_GAIN - [3:0] */ -#define WM8915_FLL_LOOP_GAIN_WIDTH 4 /* FLL_LOOP_GAIN - [3:0] */ - -/* - * R548 (0x224) - FLL Control (5) - */ -#define WM8915_FLL_FRC_NCO_VAL_MASK 0x1F80 /* FLL_FRC_NCO_VAL - [12:7] */ -#define WM8915_FLL_FRC_NCO_VAL_SHIFT 7 /* FLL_FRC_NCO_VAL - [12:7] */ -#define WM8915_FLL_FRC_NCO_VAL_WIDTH 6 /* FLL_FRC_NCO_VAL - [12:7] */ -#define WM8915_FLL_FRC_NCO 0x0040 /* FLL_FRC_NCO */ -#define WM8915_FLL_FRC_NCO_MASK 0x0040 /* FLL_FRC_NCO */ -#define WM8915_FLL_FRC_NCO_SHIFT 6 /* FLL_FRC_NCO */ -#define WM8915_FLL_FRC_NCO_WIDTH 1 /* FLL_FRC_NCO */ -#define WM8915_FLL_REFCLK_DIV_MASK 0x0018 /* FLL_REFCLK_DIV - [4:3] */ -#define WM8915_FLL_REFCLK_DIV_SHIFT 3 /* FLL_REFCLK_DIV - [4:3] */ -#define WM8915_FLL_REFCLK_DIV_WIDTH 2 /* FLL_REFCLK_DIV - [4:3] */ -#define WM8915_FLL_REF_FREQ 0x0004 /* FLL_REF_FREQ */ -#define WM8915_FLL_REF_FREQ_MASK 0x0004 /* FLL_REF_FREQ */ -#define WM8915_FLL_REF_FREQ_SHIFT 2 /* FLL_REF_FREQ */ -#define WM8915_FLL_REF_FREQ_WIDTH 1 /* FLL_REF_FREQ */ -#define WM8915_FLL_REFCLK_SRC_MASK 0x0003 /* FLL_REFCLK_SRC - [1:0] */ -#define WM8915_FLL_REFCLK_SRC_SHIFT 0 /* FLL_REFCLK_SRC - [1:0] */ -#define WM8915_FLL_REFCLK_SRC_WIDTH 2 /* FLL_REFCLK_SRC - [1:0] */ - -/* - * R549 (0x225) - FLL Control (6) - */ -#define WM8915_FLL_REFCLK_SRC_STS_MASK 0x000C /* FLL_REFCLK_SRC_STS - [3:2] */ -#define WM8915_FLL_REFCLK_SRC_STS_SHIFT 2 /* FLL_REFCLK_SRC_STS - [3:2] */ -#define WM8915_FLL_REFCLK_SRC_STS_WIDTH 2 /* FLL_REFCLK_SRC_STS - [3:2] */ -#define WM8915_FLL_SWITCH_CLK 0x0001 /* FLL_SWITCH_CLK */ -#define WM8915_FLL_SWITCH_CLK_MASK 0x0001 /* FLL_SWITCH_CLK */ -#define WM8915_FLL_SWITCH_CLK_SHIFT 0 /* FLL_SWITCH_CLK */ -#define WM8915_FLL_SWITCH_CLK_WIDTH 1 /* FLL_SWITCH_CLK */ - -/* - * R550 (0x226) - FLL EFS 1 - */ -#define WM8915_FLL_LAMBDA_MASK 0xFFFF /* FLL_LAMBDA - [15:0] */ -#define WM8915_FLL_LAMBDA_SHIFT 0 /* FLL_LAMBDA - [15:0] */ -#define WM8915_FLL_LAMBDA_WIDTH 16 /* FLL_LAMBDA - [15:0] */ - -/* - * R551 (0x227) - FLL EFS 2 - */ -#define WM8915_FLL_LFSR_SEL_MASK 0x0006 /* FLL_LFSR_SEL - [2:1] */ -#define WM8915_FLL_LFSR_SEL_SHIFT 1 /* FLL_LFSR_SEL - [2:1] */ -#define WM8915_FLL_LFSR_SEL_WIDTH 2 /* FLL_LFSR_SEL - [2:1] */ -#define WM8915_FLL_EFS_ENA 0x0001 /* FLL_EFS_ENA */ -#define WM8915_FLL_EFS_ENA_MASK 0x0001 /* FLL_EFS_ENA */ -#define WM8915_FLL_EFS_ENA_SHIFT 0 /* FLL_EFS_ENA */ -#define WM8915_FLL_EFS_ENA_WIDTH 1 /* FLL_EFS_ENA */ - -/* - * R768 (0x300) - AIF1 Control - */ -#define WM8915_AIF1_TRI 0x0004 /* AIF1_TRI */ -#define WM8915_AIF1_TRI_MASK 0x0004 /* AIF1_TRI */ -#define WM8915_AIF1_TRI_SHIFT 2 /* AIF1_TRI */ -#define WM8915_AIF1_TRI_WIDTH 1 /* AIF1_TRI */ -#define WM8915_AIF1_FMT_MASK 0x0003 /* AIF1_FMT - [1:0] */ -#define WM8915_AIF1_FMT_SHIFT 0 /* AIF1_FMT - [1:0] */ -#define WM8915_AIF1_FMT_WIDTH 2 /* AIF1_FMT - [1:0] */ - -/* - * R769 (0x301) - AIF1 BCLK - */ -#define WM8915_AIF1_BCLK_INV 0x0400 /* AIF1_BCLK_INV */ -#define WM8915_AIF1_BCLK_INV_MASK 0x0400 /* AIF1_BCLK_INV */ -#define WM8915_AIF1_BCLK_INV_SHIFT 10 /* AIF1_BCLK_INV */ -#define WM8915_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */ -#define WM8915_AIF1_BCLK_FRC 0x0200 /* AIF1_BCLK_FRC */ -#define WM8915_AIF1_BCLK_FRC_MASK 0x0200 /* AIF1_BCLK_FRC */ -#define WM8915_AIF1_BCLK_FRC_SHIFT 9 /* AIF1_BCLK_FRC */ -#define WM8915_AIF1_BCLK_FRC_WIDTH 1 /* AIF1_BCLK_FRC */ -#define WM8915_AIF1_BCLK_MSTR 0x0100 /* AIF1_BCLK_MSTR */ -#define WM8915_AIF1_BCLK_MSTR_MASK 0x0100 /* AIF1_BCLK_MSTR */ -#define WM8915_AIF1_BCLK_MSTR_SHIFT 8 /* AIF1_BCLK_MSTR */ -#define WM8915_AIF1_BCLK_MSTR_WIDTH 1 /* AIF1_BCLK_MSTR */ -#define WM8915_AIF1_BCLK_DIV_MASK 0x000F /* AIF1_BCLK_DIV - [3:0] */ -#define WM8915_AIF1_BCLK_DIV_SHIFT 0 /* AIF1_BCLK_DIV - [3:0] */ -#define WM8915_AIF1_BCLK_DIV_WIDTH 4 /* AIF1_BCLK_DIV - [3:0] */ - -/* - * R770 (0x302) - AIF1 TX LRCLK(1) - */ -#define WM8915_AIF1TX_RATE_MASK 0x07FF /* AIF1TX_RATE - [10:0] */ -#define WM8915_AIF1TX_RATE_SHIFT 0 /* AIF1TX_RATE - [10:0] */ -#define WM8915_AIF1TX_RATE_WIDTH 11 /* AIF1TX_RATE - [10:0] */ - -/* - * R771 (0x303) - AIF1 TX LRCLK(2) - */ -#define WM8915_AIF1TX_LRCLK_MODE 0x0008 /* AIF1TX_LRCLK_MODE */ -#define WM8915_AIF1TX_LRCLK_MODE_MASK 0x0008 /* AIF1TX_LRCLK_MODE */ -#define WM8915_AIF1TX_LRCLK_MODE_SHIFT 3 /* AIF1TX_LRCLK_MODE */ -#define WM8915_AIF1TX_LRCLK_MODE_WIDTH 1 /* AIF1TX_LRCLK_MODE */ -#define WM8915_AIF1TX_LRCLK_INV 0x0004 /* AIF1TX_LRCLK_INV */ -#define WM8915_AIF1TX_LRCLK_INV_MASK 0x0004 /* AIF1TX_LRCLK_INV */ -#define WM8915_AIF1TX_LRCLK_INV_SHIFT 2 /* AIF1TX_LRCLK_INV */ -#define WM8915_AIF1TX_LRCLK_INV_WIDTH 1 /* AIF1TX_LRCLK_INV */ -#define WM8915_AIF1TX_LRCLK_FRC 0x0002 /* AIF1TX_LRCLK_FRC */ -#define WM8915_AIF1TX_LRCLK_FRC_MASK 0x0002 /* AIF1TX_LRCLK_FRC */ -#define WM8915_AIF1TX_LRCLK_FRC_SHIFT 1 /* AIF1TX_LRCLK_FRC */ -#define WM8915_AIF1TX_LRCLK_FRC_WIDTH 1 /* AIF1TX_LRCLK_FRC */ -#define WM8915_AIF1TX_LRCLK_MSTR 0x0001 /* AIF1TX_LRCLK_MSTR */ -#define WM8915_AIF1TX_LRCLK_MSTR_MASK 0x0001 /* AIF1TX_LRCLK_MSTR */ -#define WM8915_AIF1TX_LRCLK_MSTR_SHIFT 0 /* AIF1TX_LRCLK_MSTR */ -#define WM8915_AIF1TX_LRCLK_MSTR_WIDTH 1 /* AIF1TX_LRCLK_MSTR */ - -/* - * R772 (0x304) - AIF1 RX LRCLK(1) - */ -#define WM8915_AIF1RX_RATE_MASK 0x07FF /* AIF1RX_RATE - [10:0] */ -#define WM8915_AIF1RX_RATE_SHIFT 0 /* AIF1RX_RATE - [10:0] */ -#define WM8915_AIF1RX_RATE_WIDTH 11 /* AIF1RX_RATE - [10:0] */ - -/* - * R773 (0x305) - AIF1 RX LRCLK(2) - */ -#define WM8915_AIF1RX_LRCLK_INV 0x0004 /* AIF1RX_LRCLK_INV */ -#define WM8915_AIF1RX_LRCLK_INV_MASK 0x0004 /* AIF1RX_LRCLK_INV */ -#define WM8915_AIF1RX_LRCLK_INV_SHIFT 2 /* AIF1RX_LRCLK_INV */ -#define WM8915_AIF1RX_LRCLK_INV_WIDTH 1 /* AIF1RX_LRCLK_INV */ -#define WM8915_AIF1RX_LRCLK_FRC 0x0002 /* AIF1RX_LRCLK_FRC */ -#define WM8915_AIF1RX_LRCLK_FRC_MASK 0x0002 /* AIF1RX_LRCLK_FRC */ -#define WM8915_AIF1RX_LRCLK_FRC_SHIFT 1 /* AIF1RX_LRCLK_FRC */ -#define WM8915_AIF1RX_LRCLK_FRC_WIDTH 1 /* AIF1RX_LRCLK_FRC */ -#define WM8915_AIF1RX_LRCLK_MSTR 0x0001 /* AIF1RX_LRCLK_MSTR */ -#define WM8915_AIF1RX_LRCLK_MSTR_MASK 0x0001 /* AIF1RX_LRCLK_MSTR */ -#define WM8915_AIF1RX_LRCLK_MSTR_SHIFT 0 /* AIF1RX_LRCLK_MSTR */ -#define WM8915_AIF1RX_LRCLK_MSTR_WIDTH 1 /* AIF1RX_LRCLK_MSTR */ - -/* - * R774 (0x306) - AIF1TX Data Configuration (1) - */ -#define WM8915_AIF1TX_WL_MASK 0xFF00 /* AIF1TX_WL - [15:8] */ -#define WM8915_AIF1TX_WL_SHIFT 8 /* AIF1TX_WL - [15:8] */ -#define WM8915_AIF1TX_WL_WIDTH 8 /* AIF1TX_WL - [15:8] */ -#define WM8915_AIF1TX_SLOT_LEN_MASK 0x00FF /* AIF1TX_SLOT_LEN - [7:0] */ -#define WM8915_AIF1TX_SLOT_LEN_SHIFT 0 /* AIF1TX_SLOT_LEN - [7:0] */ -#define WM8915_AIF1TX_SLOT_LEN_WIDTH 8 /* AIF1TX_SLOT_LEN - [7:0] */ - -/* - * R775 (0x307) - AIF1TX Data Configuration (2) - */ -#define WM8915_AIF1TX_DAT_TRI 0x0001 /* AIF1TX_DAT_TRI */ -#define WM8915_AIF1TX_DAT_TRI_MASK 0x0001 /* AIF1TX_DAT_TRI */ -#define WM8915_AIF1TX_DAT_TRI_SHIFT 0 /* AIF1TX_DAT_TRI */ -#define WM8915_AIF1TX_DAT_TRI_WIDTH 1 /* AIF1TX_DAT_TRI */ - -/* - * R776 (0x308) - AIF1RX Data Configuration - */ -#define WM8915_AIF1RX_WL_MASK 0xFF00 /* AIF1RX_WL - [15:8] */ -#define WM8915_AIF1RX_WL_SHIFT 8 /* AIF1RX_WL - [15:8] */ -#define WM8915_AIF1RX_WL_WIDTH 8 /* AIF1RX_WL - [15:8] */ -#define WM8915_AIF1RX_SLOT_LEN_MASK 0x00FF /* AIF1RX_SLOT_LEN - [7:0] */ -#define WM8915_AIF1RX_SLOT_LEN_SHIFT 0 /* AIF1RX_SLOT_LEN - [7:0] */ -#define WM8915_AIF1RX_SLOT_LEN_WIDTH 8 /* AIF1RX_SLOT_LEN - [7:0] */ - -/* - * R777 (0x309) - AIF1TX Channel 0 Configuration - */ -#define WM8915_AIF1TX_CHAN0_DAT_INV 0x8000 /* AIF1TX_CHAN0_DAT_INV */ -#define WM8915_AIF1TX_CHAN0_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN0_DAT_INV */ -#define WM8915_AIF1TX_CHAN0_DAT_INV_SHIFT 15 /* AIF1TX_CHAN0_DAT_INV */ -#define WM8915_AIF1TX_CHAN0_DAT_INV_WIDTH 1 /* AIF1TX_CHAN0_DAT_INV */ -#define WM8915_AIF1TX_CHAN0_SPACING_MASK 0x7E00 /* AIF1TX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN0_SPACING_SHIFT 9 /* AIF1TX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN0_SPACING_WIDTH 6 /* AIF1TX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN0_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN0_SLOTS_SHIFT 6 /* AIF1TX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN0_SLOTS_WIDTH 3 /* AIF1TX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN0_START_SLOT_MASK 0x003F /* AIF1TX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN0_START_SLOT_SHIFT 0 /* AIF1TX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN0_START_SLOT_WIDTH 6 /* AIF1TX_CHAN0_START_SLOT - [5:0] */ - -/* - * R778 (0x30A) - AIF1TX Channel 1 Configuration - */ -#define WM8915_AIF1TX_CHAN1_DAT_INV 0x8000 /* AIF1TX_CHAN1_DAT_INV */ -#define WM8915_AIF1TX_CHAN1_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN1_DAT_INV */ -#define WM8915_AIF1TX_CHAN1_DAT_INV_SHIFT 15 /* AIF1TX_CHAN1_DAT_INV */ -#define WM8915_AIF1TX_CHAN1_DAT_INV_WIDTH 1 /* AIF1TX_CHAN1_DAT_INV */ -#define WM8915_AIF1TX_CHAN1_SPACING_MASK 0x7E00 /* AIF1TX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN1_SPACING_SHIFT 9 /* AIF1TX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN1_SPACING_WIDTH 6 /* AIF1TX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN1_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN1_SLOTS_SHIFT 6 /* AIF1TX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN1_SLOTS_WIDTH 3 /* AIF1TX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN1_START_SLOT_MASK 0x003F /* AIF1TX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN1_START_SLOT_SHIFT 0 /* AIF1TX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN1_START_SLOT_WIDTH 6 /* AIF1TX_CHAN1_START_SLOT - [5:0] */ - -/* - * R779 (0x30B) - AIF1TX Channel 2 Configuration - */ -#define WM8915_AIF1TX_CHAN2_DAT_INV 0x8000 /* AIF1TX_CHAN2_DAT_INV */ -#define WM8915_AIF1TX_CHAN2_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN2_DAT_INV */ -#define WM8915_AIF1TX_CHAN2_DAT_INV_SHIFT 15 /* AIF1TX_CHAN2_DAT_INV */ -#define WM8915_AIF1TX_CHAN2_DAT_INV_WIDTH 1 /* AIF1TX_CHAN2_DAT_INV */ -#define WM8915_AIF1TX_CHAN2_SPACING_MASK 0x7E00 /* AIF1TX_CHAN2_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN2_SPACING_SHIFT 9 /* AIF1TX_CHAN2_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN2_SPACING_WIDTH 6 /* AIF1TX_CHAN2_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN2_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN2_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN2_SLOTS_SHIFT 6 /* AIF1TX_CHAN2_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN2_SLOTS_WIDTH 3 /* AIF1TX_CHAN2_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN2_START_SLOT_MASK 0x003F /* AIF1TX_CHAN2_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN2_START_SLOT_SHIFT 0 /* AIF1TX_CHAN2_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN2_START_SLOT_WIDTH 6 /* AIF1TX_CHAN2_START_SLOT - [5:0] */ - -/* - * R780 (0x30C) - AIF1TX Channel 3 Configuration - */ -#define WM8915_AIF1TX_CHAN3_DAT_INV 0x8000 /* AIF1TX_CHAN3_DAT_INV */ -#define WM8915_AIF1TX_CHAN3_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN3_DAT_INV */ -#define WM8915_AIF1TX_CHAN3_DAT_INV_SHIFT 15 /* AIF1TX_CHAN3_DAT_INV */ -#define WM8915_AIF1TX_CHAN3_DAT_INV_WIDTH 1 /* AIF1TX_CHAN3_DAT_INV */ -#define WM8915_AIF1TX_CHAN3_SPACING_MASK 0x7E00 /* AIF1TX_CHAN3_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN3_SPACING_SHIFT 9 /* AIF1TX_CHAN3_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN3_SPACING_WIDTH 6 /* AIF1TX_CHAN3_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN3_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN3_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN3_SLOTS_SHIFT 6 /* AIF1TX_CHAN3_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN3_SLOTS_WIDTH 3 /* AIF1TX_CHAN3_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN3_START_SLOT_MASK 0x003F /* AIF1TX_CHAN3_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN3_START_SLOT_SHIFT 0 /* AIF1TX_CHAN3_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN3_START_SLOT_WIDTH 6 /* AIF1TX_CHAN3_START_SLOT - [5:0] */ - -/* - * R781 (0x30D) - AIF1TX Channel 4 Configuration - */ -#define WM8915_AIF1TX_CHAN4_DAT_INV 0x8000 /* AIF1TX_CHAN4_DAT_INV */ -#define WM8915_AIF1TX_CHAN4_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN4_DAT_INV */ -#define WM8915_AIF1TX_CHAN4_DAT_INV_SHIFT 15 /* AIF1TX_CHAN4_DAT_INV */ -#define WM8915_AIF1TX_CHAN4_DAT_INV_WIDTH 1 /* AIF1TX_CHAN4_DAT_INV */ -#define WM8915_AIF1TX_CHAN4_SPACING_MASK 0x7E00 /* AIF1TX_CHAN4_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN4_SPACING_SHIFT 9 /* AIF1TX_CHAN4_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN4_SPACING_WIDTH 6 /* AIF1TX_CHAN4_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN4_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN4_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN4_SLOTS_SHIFT 6 /* AIF1TX_CHAN4_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN4_SLOTS_WIDTH 3 /* AIF1TX_CHAN4_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN4_START_SLOT_MASK 0x003F /* AIF1TX_CHAN4_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN4_START_SLOT_SHIFT 0 /* AIF1TX_CHAN4_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN4_START_SLOT_WIDTH 6 /* AIF1TX_CHAN4_START_SLOT - [5:0] */ - -/* - * R782 (0x30E) - AIF1TX Channel 5 Configuration - */ -#define WM8915_AIF1TX_CHAN5_DAT_INV 0x8000 /* AIF1TX_CHAN5_DAT_INV */ -#define WM8915_AIF1TX_CHAN5_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN5_DAT_INV */ -#define WM8915_AIF1TX_CHAN5_DAT_INV_SHIFT 15 /* AIF1TX_CHAN5_DAT_INV */ -#define WM8915_AIF1TX_CHAN5_DAT_INV_WIDTH 1 /* AIF1TX_CHAN5_DAT_INV */ -#define WM8915_AIF1TX_CHAN5_SPACING_MASK 0x7E00 /* AIF1TX_CHAN5_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN5_SPACING_SHIFT 9 /* AIF1TX_CHAN5_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN5_SPACING_WIDTH 6 /* AIF1TX_CHAN5_SPACING - [14:9] */ -#define WM8915_AIF1TX_CHAN5_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN5_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN5_SLOTS_SHIFT 6 /* AIF1TX_CHAN5_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN5_SLOTS_WIDTH 3 /* AIF1TX_CHAN5_SLOTS - [8:6] */ -#define WM8915_AIF1TX_CHAN5_START_SLOT_MASK 0x003F /* AIF1TX_CHAN5_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN5_START_SLOT_SHIFT 0 /* AIF1TX_CHAN5_START_SLOT - [5:0] */ -#define WM8915_AIF1TX_CHAN5_START_SLOT_WIDTH 6 /* AIF1TX_CHAN5_START_SLOT - [5:0] */ - -/* - * R783 (0x30F) - AIF1RX Channel 0 Configuration - */ -#define WM8915_AIF1RX_CHAN0_DAT_INV 0x8000 /* AIF1RX_CHAN0_DAT_INV */ -#define WM8915_AIF1RX_CHAN0_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN0_DAT_INV */ -#define WM8915_AIF1RX_CHAN0_DAT_INV_SHIFT 15 /* AIF1RX_CHAN0_DAT_INV */ -#define WM8915_AIF1RX_CHAN0_DAT_INV_WIDTH 1 /* AIF1RX_CHAN0_DAT_INV */ -#define WM8915_AIF1RX_CHAN0_SPACING_MASK 0x7E00 /* AIF1RX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN0_SPACING_SHIFT 9 /* AIF1RX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN0_SPACING_WIDTH 6 /* AIF1RX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN0_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN0_SLOTS_SHIFT 6 /* AIF1RX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN0_SLOTS_WIDTH 3 /* AIF1RX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN0_START_SLOT_MASK 0x003F /* AIF1RX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN0_START_SLOT_SHIFT 0 /* AIF1RX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN0_START_SLOT_WIDTH 6 /* AIF1RX_CHAN0_START_SLOT - [5:0] */ - -/* - * R784 (0x310) - AIF1RX Channel 1 Configuration - */ -#define WM8915_AIF1RX_CHAN1_DAT_INV 0x8000 /* AIF1RX_CHAN1_DAT_INV */ -#define WM8915_AIF1RX_CHAN1_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN1_DAT_INV */ -#define WM8915_AIF1RX_CHAN1_DAT_INV_SHIFT 15 /* AIF1RX_CHAN1_DAT_INV */ -#define WM8915_AIF1RX_CHAN1_DAT_INV_WIDTH 1 /* AIF1RX_CHAN1_DAT_INV */ -#define WM8915_AIF1RX_CHAN1_SPACING_MASK 0x7E00 /* AIF1RX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN1_SPACING_SHIFT 9 /* AIF1RX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN1_SPACING_WIDTH 6 /* AIF1RX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN1_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN1_SLOTS_SHIFT 6 /* AIF1RX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN1_SLOTS_WIDTH 3 /* AIF1RX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN1_START_SLOT_MASK 0x003F /* AIF1RX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN1_START_SLOT_SHIFT 0 /* AIF1RX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN1_START_SLOT_WIDTH 6 /* AIF1RX_CHAN1_START_SLOT - [5:0] */ - -/* - * R785 (0x311) - AIF1RX Channel 2 Configuration - */ -#define WM8915_AIF1RX_CHAN2_DAT_INV 0x8000 /* AIF1RX_CHAN2_DAT_INV */ -#define WM8915_AIF1RX_CHAN2_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN2_DAT_INV */ -#define WM8915_AIF1RX_CHAN2_DAT_INV_SHIFT 15 /* AIF1RX_CHAN2_DAT_INV */ -#define WM8915_AIF1RX_CHAN2_DAT_INV_WIDTH 1 /* AIF1RX_CHAN2_DAT_INV */ -#define WM8915_AIF1RX_CHAN2_SPACING_MASK 0x7E00 /* AIF1RX_CHAN2_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN2_SPACING_SHIFT 9 /* AIF1RX_CHAN2_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN2_SPACING_WIDTH 6 /* AIF1RX_CHAN2_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN2_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN2_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN2_SLOTS_SHIFT 6 /* AIF1RX_CHAN2_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN2_SLOTS_WIDTH 3 /* AIF1RX_CHAN2_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN2_START_SLOT_MASK 0x003F /* AIF1RX_CHAN2_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN2_START_SLOT_SHIFT 0 /* AIF1RX_CHAN2_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN2_START_SLOT_WIDTH 6 /* AIF1RX_CHAN2_START_SLOT - [5:0] */ - -/* - * R786 (0x312) - AIF1RX Channel 3 Configuration - */ -#define WM8915_AIF1RX_CHAN3_DAT_INV 0x8000 /* AIF1RX_CHAN3_DAT_INV */ -#define WM8915_AIF1RX_CHAN3_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN3_DAT_INV */ -#define WM8915_AIF1RX_CHAN3_DAT_INV_SHIFT 15 /* AIF1RX_CHAN3_DAT_INV */ -#define WM8915_AIF1RX_CHAN3_DAT_INV_WIDTH 1 /* AIF1RX_CHAN3_DAT_INV */ -#define WM8915_AIF1RX_CHAN3_SPACING_MASK 0x7E00 /* AIF1RX_CHAN3_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN3_SPACING_SHIFT 9 /* AIF1RX_CHAN3_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN3_SPACING_WIDTH 6 /* AIF1RX_CHAN3_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN3_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN3_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN3_SLOTS_SHIFT 6 /* AIF1RX_CHAN3_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN3_SLOTS_WIDTH 3 /* AIF1RX_CHAN3_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN3_START_SLOT_MASK 0x003F /* AIF1RX_CHAN3_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN3_START_SLOT_SHIFT 0 /* AIF1RX_CHAN3_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN3_START_SLOT_WIDTH 6 /* AIF1RX_CHAN3_START_SLOT - [5:0] */ - -/* - * R787 (0x313) - AIF1RX Channel 4 Configuration - */ -#define WM8915_AIF1RX_CHAN4_DAT_INV 0x8000 /* AIF1RX_CHAN4_DAT_INV */ -#define WM8915_AIF1RX_CHAN4_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN4_DAT_INV */ -#define WM8915_AIF1RX_CHAN4_DAT_INV_SHIFT 15 /* AIF1RX_CHAN4_DAT_INV */ -#define WM8915_AIF1RX_CHAN4_DAT_INV_WIDTH 1 /* AIF1RX_CHAN4_DAT_INV */ -#define WM8915_AIF1RX_CHAN4_SPACING_MASK 0x7E00 /* AIF1RX_CHAN4_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN4_SPACING_SHIFT 9 /* AIF1RX_CHAN4_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN4_SPACING_WIDTH 6 /* AIF1RX_CHAN4_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN4_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN4_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN4_SLOTS_SHIFT 6 /* AIF1RX_CHAN4_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN4_SLOTS_WIDTH 3 /* AIF1RX_CHAN4_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN4_START_SLOT_MASK 0x003F /* AIF1RX_CHAN4_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN4_START_SLOT_SHIFT 0 /* AIF1RX_CHAN4_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN4_START_SLOT_WIDTH 6 /* AIF1RX_CHAN4_START_SLOT - [5:0] */ - -/* - * R788 (0x314) - AIF1RX Channel 5 Configuration - */ -#define WM8915_AIF1RX_CHAN5_DAT_INV 0x8000 /* AIF1RX_CHAN5_DAT_INV */ -#define WM8915_AIF1RX_CHAN5_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN5_DAT_INV */ -#define WM8915_AIF1RX_CHAN5_DAT_INV_SHIFT 15 /* AIF1RX_CHAN5_DAT_INV */ -#define WM8915_AIF1RX_CHAN5_DAT_INV_WIDTH 1 /* AIF1RX_CHAN5_DAT_INV */ -#define WM8915_AIF1RX_CHAN5_SPACING_MASK 0x7E00 /* AIF1RX_CHAN5_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN5_SPACING_SHIFT 9 /* AIF1RX_CHAN5_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN5_SPACING_WIDTH 6 /* AIF1RX_CHAN5_SPACING - [14:9] */ -#define WM8915_AIF1RX_CHAN5_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN5_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN5_SLOTS_SHIFT 6 /* AIF1RX_CHAN5_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN5_SLOTS_WIDTH 3 /* AIF1RX_CHAN5_SLOTS - [8:6] */ -#define WM8915_AIF1RX_CHAN5_START_SLOT_MASK 0x003F /* AIF1RX_CHAN5_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN5_START_SLOT_SHIFT 0 /* AIF1RX_CHAN5_START_SLOT - [5:0] */ -#define WM8915_AIF1RX_CHAN5_START_SLOT_WIDTH 6 /* AIF1RX_CHAN5_START_SLOT - [5:0] */ - -/* - * R789 (0x315) - AIF1RX Mono Configuration - */ -#define WM8915_AIF1RX_CHAN4_MONO_MODE 0x0004 /* AIF1RX_CHAN4_MONO_MODE */ -#define WM8915_AIF1RX_CHAN4_MONO_MODE_MASK 0x0004 /* AIF1RX_CHAN4_MONO_MODE */ -#define WM8915_AIF1RX_CHAN4_MONO_MODE_SHIFT 2 /* AIF1RX_CHAN4_MONO_MODE */ -#define WM8915_AIF1RX_CHAN4_MONO_MODE_WIDTH 1 /* AIF1RX_CHAN4_MONO_MODE */ -#define WM8915_AIF1RX_CHAN2_MONO_MODE 0x0002 /* AIF1RX_CHAN2_MONO_MODE */ -#define WM8915_AIF1RX_CHAN2_MONO_MODE_MASK 0x0002 /* AIF1RX_CHAN2_MONO_MODE */ -#define WM8915_AIF1RX_CHAN2_MONO_MODE_SHIFT 1 /* AIF1RX_CHAN2_MONO_MODE */ -#define WM8915_AIF1RX_CHAN2_MONO_MODE_WIDTH 1 /* AIF1RX_CHAN2_MONO_MODE */ -#define WM8915_AIF1RX_CHAN0_MONO_MODE 0x0001 /* AIF1RX_CHAN0_MONO_MODE */ -#define WM8915_AIF1RX_CHAN0_MONO_MODE_MASK 0x0001 /* AIF1RX_CHAN0_MONO_MODE */ -#define WM8915_AIF1RX_CHAN0_MONO_MODE_SHIFT 0 /* AIF1RX_CHAN0_MONO_MODE */ -#define WM8915_AIF1RX_CHAN0_MONO_MODE_WIDTH 1 /* AIF1RX_CHAN0_MONO_MODE */ - -/* - * R794 (0x31A) - AIF1TX Test - */ -#define WM8915_AIF1TX45_DITHER_ENA 0x0004 /* AIF1TX45_DITHER_ENA */ -#define WM8915_AIF1TX45_DITHER_ENA_MASK 0x0004 /* AIF1TX45_DITHER_ENA */ -#define WM8915_AIF1TX45_DITHER_ENA_SHIFT 2 /* AIF1TX45_DITHER_ENA */ -#define WM8915_AIF1TX45_DITHER_ENA_WIDTH 1 /* AIF1TX45_DITHER_ENA */ -#define WM8915_AIF1TX23_DITHER_ENA 0x0002 /* AIF1TX23_DITHER_ENA */ -#define WM8915_AIF1TX23_DITHER_ENA_MASK 0x0002 /* AIF1TX23_DITHER_ENA */ -#define WM8915_AIF1TX23_DITHER_ENA_SHIFT 1 /* AIF1TX23_DITHER_ENA */ -#define WM8915_AIF1TX23_DITHER_ENA_WIDTH 1 /* AIF1TX23_DITHER_ENA */ -#define WM8915_AIF1TX01_DITHER_ENA 0x0001 /* AIF1TX01_DITHER_ENA */ -#define WM8915_AIF1TX01_DITHER_ENA_MASK 0x0001 /* AIF1TX01_DITHER_ENA */ -#define WM8915_AIF1TX01_DITHER_ENA_SHIFT 0 /* AIF1TX01_DITHER_ENA */ -#define WM8915_AIF1TX01_DITHER_ENA_WIDTH 1 /* AIF1TX01_DITHER_ENA */ - -/* - * R800 (0x320) - AIF2 Control - */ -#define WM8915_AIF2_TRI 0x0004 /* AIF2_TRI */ -#define WM8915_AIF2_TRI_MASK 0x0004 /* AIF2_TRI */ -#define WM8915_AIF2_TRI_SHIFT 2 /* AIF2_TRI */ -#define WM8915_AIF2_TRI_WIDTH 1 /* AIF2_TRI */ -#define WM8915_AIF2_FMT_MASK 0x0003 /* AIF2_FMT - [1:0] */ -#define WM8915_AIF2_FMT_SHIFT 0 /* AIF2_FMT - [1:0] */ -#define WM8915_AIF2_FMT_WIDTH 2 /* AIF2_FMT - [1:0] */ - -/* - * R801 (0x321) - AIF2 BCLK - */ -#define WM8915_AIF2_BCLK_INV 0x0400 /* AIF2_BCLK_INV */ -#define WM8915_AIF2_BCLK_INV_MASK 0x0400 /* AIF2_BCLK_INV */ -#define WM8915_AIF2_BCLK_INV_SHIFT 10 /* AIF2_BCLK_INV */ -#define WM8915_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */ -#define WM8915_AIF2_BCLK_FRC 0x0200 /* AIF2_BCLK_FRC */ -#define WM8915_AIF2_BCLK_FRC_MASK 0x0200 /* AIF2_BCLK_FRC */ -#define WM8915_AIF2_BCLK_FRC_SHIFT 9 /* AIF2_BCLK_FRC */ -#define WM8915_AIF2_BCLK_FRC_WIDTH 1 /* AIF2_BCLK_FRC */ -#define WM8915_AIF2_BCLK_MSTR 0x0100 /* AIF2_BCLK_MSTR */ -#define WM8915_AIF2_BCLK_MSTR_MASK 0x0100 /* AIF2_BCLK_MSTR */ -#define WM8915_AIF2_BCLK_MSTR_SHIFT 8 /* AIF2_BCLK_MSTR */ -#define WM8915_AIF2_BCLK_MSTR_WIDTH 1 /* AIF2_BCLK_MSTR */ -#define WM8915_AIF2_BCLK_DIV_MASK 0x000F /* AIF2_BCLK_DIV - [3:0] */ -#define WM8915_AIF2_BCLK_DIV_SHIFT 0 /* AIF2_BCLK_DIV - [3:0] */ -#define WM8915_AIF2_BCLK_DIV_WIDTH 4 /* AIF2_BCLK_DIV - [3:0] */ - -/* - * R802 (0x322) - AIF2 TX LRCLK(1) - */ -#define WM8915_AIF2TX_RATE_MASK 0x07FF /* AIF2TX_RATE - [10:0] */ -#define WM8915_AIF2TX_RATE_SHIFT 0 /* AIF2TX_RATE - [10:0] */ -#define WM8915_AIF2TX_RATE_WIDTH 11 /* AIF2TX_RATE - [10:0] */ - -/* - * R803 (0x323) - AIF2 TX LRCLK(2) - */ -#define WM8915_AIF2TX_LRCLK_MODE 0x0008 /* AIF2TX_LRCLK_MODE */ -#define WM8915_AIF2TX_LRCLK_MODE_MASK 0x0008 /* AIF2TX_LRCLK_MODE */ -#define WM8915_AIF2TX_LRCLK_MODE_SHIFT 3 /* AIF2TX_LRCLK_MODE */ -#define WM8915_AIF2TX_LRCLK_MODE_WIDTH 1 /* AIF2TX_LRCLK_MODE */ -#define WM8915_AIF2TX_LRCLK_INV 0x0004 /* AIF2TX_LRCLK_INV */ -#define WM8915_AIF2TX_LRCLK_INV_MASK 0x0004 /* AIF2TX_LRCLK_INV */ -#define WM8915_AIF2TX_LRCLK_INV_SHIFT 2 /* AIF2TX_LRCLK_INV */ -#define WM8915_AIF2TX_LRCLK_INV_WIDTH 1 /* AIF2TX_LRCLK_INV */ -#define WM8915_AIF2TX_LRCLK_FRC 0x0002 /* AIF2TX_LRCLK_FRC */ -#define WM8915_AIF2TX_LRCLK_FRC_MASK 0x0002 /* AIF2TX_LRCLK_FRC */ -#define WM8915_AIF2TX_LRCLK_FRC_SHIFT 1 /* AIF2TX_LRCLK_FRC */ -#define WM8915_AIF2TX_LRCLK_FRC_WIDTH 1 /* AIF2TX_LRCLK_FRC */ -#define WM8915_AIF2TX_LRCLK_MSTR 0x0001 /* AIF2TX_LRCLK_MSTR */ -#define WM8915_AIF2TX_LRCLK_MSTR_MASK 0x0001 /* AIF2TX_LRCLK_MSTR */ -#define WM8915_AIF2TX_LRCLK_MSTR_SHIFT 0 /* AIF2TX_LRCLK_MSTR */ -#define WM8915_AIF2TX_LRCLK_MSTR_WIDTH 1 /* AIF2TX_LRCLK_MSTR */ - -/* - * R804 (0x324) - AIF2 RX LRCLK(1) - */ -#define WM8915_AIF2RX_RATE_MASK 0x07FF /* AIF2RX_RATE - [10:0] */ -#define WM8915_AIF2RX_RATE_SHIFT 0 /* AIF2RX_RATE - [10:0] */ -#define WM8915_AIF2RX_RATE_WIDTH 11 /* AIF2RX_RATE - [10:0] */ - -/* - * R805 (0x325) - AIF2 RX LRCLK(2) - */ -#define WM8915_AIF2RX_LRCLK_INV 0x0004 /* AIF2RX_LRCLK_INV */ -#define WM8915_AIF2RX_LRCLK_INV_MASK 0x0004 /* AIF2RX_LRCLK_INV */ -#define WM8915_AIF2RX_LRCLK_INV_SHIFT 2 /* AIF2RX_LRCLK_INV */ -#define WM8915_AIF2RX_LRCLK_INV_WIDTH 1 /* AIF2RX_LRCLK_INV */ -#define WM8915_AIF2RX_LRCLK_FRC 0x0002 /* AIF2RX_LRCLK_FRC */ -#define WM8915_AIF2RX_LRCLK_FRC_MASK 0x0002 /* AIF2RX_LRCLK_FRC */ -#define WM8915_AIF2RX_LRCLK_FRC_SHIFT 1 /* AIF2RX_LRCLK_FRC */ -#define WM8915_AIF2RX_LRCLK_FRC_WIDTH 1 /* AIF2RX_LRCLK_FRC */ -#define WM8915_AIF2RX_LRCLK_MSTR 0x0001 /* AIF2RX_LRCLK_MSTR */ -#define WM8915_AIF2RX_LRCLK_MSTR_MASK 0x0001 /* AIF2RX_LRCLK_MSTR */ -#define WM8915_AIF2RX_LRCLK_MSTR_SHIFT 0 /* AIF2RX_LRCLK_MSTR */ -#define WM8915_AIF2RX_LRCLK_MSTR_WIDTH 1 /* AIF2RX_LRCLK_MSTR */ - -/* - * R806 (0x326) - AIF2TX Data Configuration (1) - */ -#define WM8915_AIF2TX_WL_MASK 0xFF00 /* AIF2TX_WL - [15:8] */ -#define WM8915_AIF2TX_WL_SHIFT 8 /* AIF2TX_WL - [15:8] */ -#define WM8915_AIF2TX_WL_WIDTH 8 /* AIF2TX_WL - [15:8] */ -#define WM8915_AIF2TX_SLOT_LEN_MASK 0x00FF /* AIF2TX_SLOT_LEN - [7:0] */ -#define WM8915_AIF2TX_SLOT_LEN_SHIFT 0 /* AIF2TX_SLOT_LEN - [7:0] */ -#define WM8915_AIF2TX_SLOT_LEN_WIDTH 8 /* AIF2TX_SLOT_LEN - [7:0] */ - -/* - * R807 (0x327) - AIF2TX Data Configuration (2) - */ -#define WM8915_AIF2TX_DAT_TRI 0x0001 /* AIF2TX_DAT_TRI */ -#define WM8915_AIF2TX_DAT_TRI_MASK 0x0001 /* AIF2TX_DAT_TRI */ -#define WM8915_AIF2TX_DAT_TRI_SHIFT 0 /* AIF2TX_DAT_TRI */ -#define WM8915_AIF2TX_DAT_TRI_WIDTH 1 /* AIF2TX_DAT_TRI */ - -/* - * R808 (0x328) - AIF2RX Data Configuration - */ -#define WM8915_AIF2RX_WL_MASK 0xFF00 /* AIF2RX_WL - [15:8] */ -#define WM8915_AIF2RX_WL_SHIFT 8 /* AIF2RX_WL - [15:8] */ -#define WM8915_AIF2RX_WL_WIDTH 8 /* AIF2RX_WL - [15:8] */ -#define WM8915_AIF2RX_SLOT_LEN_MASK 0x00FF /* AIF2RX_SLOT_LEN - [7:0] */ -#define WM8915_AIF2RX_SLOT_LEN_SHIFT 0 /* AIF2RX_SLOT_LEN - [7:0] */ -#define WM8915_AIF2RX_SLOT_LEN_WIDTH 8 /* AIF2RX_SLOT_LEN - [7:0] */ - -/* - * R809 (0x329) - AIF2TX Channel 0 Configuration - */ -#define WM8915_AIF2TX_CHAN0_DAT_INV 0x8000 /* AIF2TX_CHAN0_DAT_INV */ -#define WM8915_AIF2TX_CHAN0_DAT_INV_MASK 0x8000 /* AIF2TX_CHAN0_DAT_INV */ -#define WM8915_AIF2TX_CHAN0_DAT_INV_SHIFT 15 /* AIF2TX_CHAN0_DAT_INV */ -#define WM8915_AIF2TX_CHAN0_DAT_INV_WIDTH 1 /* AIF2TX_CHAN0_DAT_INV */ -#define WM8915_AIF2TX_CHAN0_SPACING_MASK 0x7E00 /* AIF2TX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF2TX_CHAN0_SPACING_SHIFT 9 /* AIF2TX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF2TX_CHAN0_SPACING_WIDTH 6 /* AIF2TX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF2TX_CHAN0_SLOTS_MASK 0x01C0 /* AIF2TX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF2TX_CHAN0_SLOTS_SHIFT 6 /* AIF2TX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF2TX_CHAN0_SLOTS_WIDTH 3 /* AIF2TX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF2TX_CHAN0_START_SLOT_MASK 0x003F /* AIF2TX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF2TX_CHAN0_START_SLOT_SHIFT 0 /* AIF2TX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF2TX_CHAN0_START_SLOT_WIDTH 6 /* AIF2TX_CHAN0_START_SLOT - [5:0] */ - -/* - * R810 (0x32A) - AIF2TX Channel 1 Configuration - */ -#define WM8915_AIF2TX_CHAN1_DAT_INV 0x8000 /* AIF2TX_CHAN1_DAT_INV */ -#define WM8915_AIF2TX_CHAN1_DAT_INV_MASK 0x8000 /* AIF2TX_CHAN1_DAT_INV */ -#define WM8915_AIF2TX_CHAN1_DAT_INV_SHIFT 15 /* AIF2TX_CHAN1_DAT_INV */ -#define WM8915_AIF2TX_CHAN1_DAT_INV_WIDTH 1 /* AIF2TX_CHAN1_DAT_INV */ -#define WM8915_AIF2TX_CHAN1_SPACING_MASK 0x7E00 /* AIF2TX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF2TX_CHAN1_SPACING_SHIFT 9 /* AIF2TX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF2TX_CHAN1_SPACING_WIDTH 6 /* AIF2TX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF2TX_CHAN1_SLOTS_MASK 0x01C0 /* AIF2TX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF2TX_CHAN1_SLOTS_SHIFT 6 /* AIF2TX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF2TX_CHAN1_SLOTS_WIDTH 3 /* AIF2TX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF2TX_CHAN1_START_SLOT_MASK 0x003F /* AIF2TX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF2TX_CHAN1_START_SLOT_SHIFT 0 /* AIF2TX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF2TX_CHAN1_START_SLOT_WIDTH 6 /* AIF2TX_CHAN1_START_SLOT - [5:0] */ - -/* - * R811 (0x32B) - AIF2RX Channel 0 Configuration - */ -#define WM8915_AIF2RX_CHAN0_DAT_INV 0x8000 /* AIF2RX_CHAN0_DAT_INV */ -#define WM8915_AIF2RX_CHAN0_DAT_INV_MASK 0x8000 /* AIF2RX_CHAN0_DAT_INV */ -#define WM8915_AIF2RX_CHAN0_DAT_INV_SHIFT 15 /* AIF2RX_CHAN0_DAT_INV */ -#define WM8915_AIF2RX_CHAN0_DAT_INV_WIDTH 1 /* AIF2RX_CHAN0_DAT_INV */ -#define WM8915_AIF2RX_CHAN0_SPACING_MASK 0x7E00 /* AIF2RX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF2RX_CHAN0_SPACING_SHIFT 9 /* AIF2RX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF2RX_CHAN0_SPACING_WIDTH 6 /* AIF2RX_CHAN0_SPACING - [14:9] */ -#define WM8915_AIF2RX_CHAN0_SLOTS_MASK 0x01C0 /* AIF2RX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF2RX_CHAN0_SLOTS_SHIFT 6 /* AIF2RX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF2RX_CHAN0_SLOTS_WIDTH 3 /* AIF2RX_CHAN0_SLOTS - [8:6] */ -#define WM8915_AIF2RX_CHAN0_START_SLOT_MASK 0x003F /* AIF2RX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF2RX_CHAN0_START_SLOT_SHIFT 0 /* AIF2RX_CHAN0_START_SLOT - [5:0] */ -#define WM8915_AIF2RX_CHAN0_START_SLOT_WIDTH 6 /* AIF2RX_CHAN0_START_SLOT - [5:0] */ - -/* - * R812 (0x32C) - AIF2RX Channel 1 Configuration - */ -#define WM8915_AIF2RX_CHAN1_DAT_INV 0x8000 /* AIF2RX_CHAN1_DAT_INV */ -#define WM8915_AIF2RX_CHAN1_DAT_INV_MASK 0x8000 /* AIF2RX_CHAN1_DAT_INV */ -#define WM8915_AIF2RX_CHAN1_DAT_INV_SHIFT 15 /* AIF2RX_CHAN1_DAT_INV */ -#define WM8915_AIF2RX_CHAN1_DAT_INV_WIDTH 1 /* AIF2RX_CHAN1_DAT_INV */ -#define WM8915_AIF2RX_CHAN1_SPACING_MASK 0x7E00 /* AIF2RX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF2RX_CHAN1_SPACING_SHIFT 9 /* AIF2RX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF2RX_CHAN1_SPACING_WIDTH 6 /* AIF2RX_CHAN1_SPACING - [14:9] */ -#define WM8915_AIF2RX_CHAN1_SLOTS_MASK 0x01C0 /* AIF2RX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF2RX_CHAN1_SLOTS_SHIFT 6 /* AIF2RX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF2RX_CHAN1_SLOTS_WIDTH 3 /* AIF2RX_CHAN1_SLOTS - [8:6] */ -#define WM8915_AIF2RX_CHAN1_START_SLOT_MASK 0x003F /* AIF2RX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF2RX_CHAN1_START_SLOT_SHIFT 0 /* AIF2RX_CHAN1_START_SLOT - [5:0] */ -#define WM8915_AIF2RX_CHAN1_START_SLOT_WIDTH 6 /* AIF2RX_CHAN1_START_SLOT - [5:0] */ - -/* - * R813 (0x32D) - AIF2RX Mono Configuration - */ -#define WM8915_AIF2RX_CHAN0_MONO_MODE 0x0001 /* AIF2RX_CHAN0_MONO_MODE */ -#define WM8915_AIF2RX_CHAN0_MONO_MODE_MASK 0x0001 /* AIF2RX_CHAN0_MONO_MODE */ -#define WM8915_AIF2RX_CHAN0_MONO_MODE_SHIFT 0 /* AIF2RX_CHAN0_MONO_MODE */ -#define WM8915_AIF2RX_CHAN0_MONO_MODE_WIDTH 1 /* AIF2RX_CHAN0_MONO_MODE */ - -/* - * R815 (0x32F) - AIF2TX Test - */ -#define WM8915_AIF2TX_DITHER_ENA 0x0001 /* AIF2TX_DITHER_ENA */ -#define WM8915_AIF2TX_DITHER_ENA_MASK 0x0001 /* AIF2TX_DITHER_ENA */ -#define WM8915_AIF2TX_DITHER_ENA_SHIFT 0 /* AIF2TX_DITHER_ENA */ -#define WM8915_AIF2TX_DITHER_ENA_WIDTH 1 /* AIF2TX_DITHER_ENA */ - -/* - * R1024 (0x400) - DSP1 TX Left Volume - */ -#define WM8915_DSP1TX_VU 0x0100 /* DSP1TX_VU */ -#define WM8915_DSP1TX_VU_MASK 0x0100 /* DSP1TX_VU */ -#define WM8915_DSP1TX_VU_SHIFT 8 /* DSP1TX_VU */ -#define WM8915_DSP1TX_VU_WIDTH 1 /* DSP1TX_VU */ -#define WM8915_DSP1TXL_VOL_MASK 0x00FF /* DSP1TXL_VOL - [7:0] */ -#define WM8915_DSP1TXL_VOL_SHIFT 0 /* DSP1TXL_VOL - [7:0] */ -#define WM8915_DSP1TXL_VOL_WIDTH 8 /* DSP1TXL_VOL - [7:0] */ - -/* - * R1025 (0x401) - DSP1 TX Right Volume - */ -#define WM8915_DSP1TX_VU 0x0100 /* DSP1TX_VU */ -#define WM8915_DSP1TX_VU_MASK 0x0100 /* DSP1TX_VU */ -#define WM8915_DSP1TX_VU_SHIFT 8 /* DSP1TX_VU */ -#define WM8915_DSP1TX_VU_WIDTH 1 /* DSP1TX_VU */ -#define WM8915_DSP1TXR_VOL_MASK 0x00FF /* DSP1TXR_VOL - [7:0] */ -#define WM8915_DSP1TXR_VOL_SHIFT 0 /* DSP1TXR_VOL - [7:0] */ -#define WM8915_DSP1TXR_VOL_WIDTH 8 /* DSP1TXR_VOL - [7:0] */ - -/* - * R1026 (0x402) - DSP1 RX Left Volume - */ -#define WM8915_DSP1RX_VU 0x0100 /* DSP1RX_VU */ -#define WM8915_DSP1RX_VU_MASK 0x0100 /* DSP1RX_VU */ -#define WM8915_DSP1RX_VU_SHIFT 8 /* DSP1RX_VU */ -#define WM8915_DSP1RX_VU_WIDTH 1 /* DSP1RX_VU */ -#define WM8915_DSP1RXL_VOL_MASK 0x00FF /* DSP1RXL_VOL - [7:0] */ -#define WM8915_DSP1RXL_VOL_SHIFT 0 /* DSP1RXL_VOL - [7:0] */ -#define WM8915_DSP1RXL_VOL_WIDTH 8 /* DSP1RXL_VOL - [7:0] */ - -/* - * R1027 (0x403) - DSP1 RX Right Volume - */ -#define WM8915_DSP1RX_VU 0x0100 /* DSP1RX_VU */ -#define WM8915_DSP1RX_VU_MASK 0x0100 /* DSP1RX_VU */ -#define WM8915_DSP1RX_VU_SHIFT 8 /* DSP1RX_VU */ -#define WM8915_DSP1RX_VU_WIDTH 1 /* DSP1RX_VU */ -#define WM8915_DSP1RXR_VOL_MASK 0x00FF /* DSP1RXR_VOL - [7:0] */ -#define WM8915_DSP1RXR_VOL_SHIFT 0 /* DSP1RXR_VOL - [7:0] */ -#define WM8915_DSP1RXR_VOL_WIDTH 8 /* DSP1RXR_VOL - [7:0] */ - -/* - * R1040 (0x410) - DSP1 TX Filters - */ -#define WM8915_DSP1TX_NF 0x2000 /* DSP1TX_NF */ -#define WM8915_DSP1TX_NF_MASK 0x2000 /* DSP1TX_NF */ -#define WM8915_DSP1TX_NF_SHIFT 13 /* DSP1TX_NF */ -#define WM8915_DSP1TX_NF_WIDTH 1 /* DSP1TX_NF */ -#define WM8915_DSP1TXL_HPF 0x1000 /* DSP1TXL_HPF */ -#define WM8915_DSP1TXL_HPF_MASK 0x1000 /* DSP1TXL_HPF */ -#define WM8915_DSP1TXL_HPF_SHIFT 12 /* DSP1TXL_HPF */ -#define WM8915_DSP1TXL_HPF_WIDTH 1 /* DSP1TXL_HPF */ -#define WM8915_DSP1TXR_HPF 0x0800 /* DSP1TXR_HPF */ -#define WM8915_DSP1TXR_HPF_MASK 0x0800 /* DSP1TXR_HPF */ -#define WM8915_DSP1TXR_HPF_SHIFT 11 /* DSP1TXR_HPF */ -#define WM8915_DSP1TXR_HPF_WIDTH 1 /* DSP1TXR_HPF */ -#define WM8915_DSP1TX_HPF_MODE_MASK 0x0018 /* DSP1TX_HPF_MODE - [4:3] */ -#define WM8915_DSP1TX_HPF_MODE_SHIFT 3 /* DSP1TX_HPF_MODE - [4:3] */ -#define WM8915_DSP1TX_HPF_MODE_WIDTH 2 /* DSP1TX_HPF_MODE - [4:3] */ -#define WM8915_DSP1TX_HPF_CUT_MASK 0x0007 /* DSP1TX_HPF_CUT - [2:0] */ -#define WM8915_DSP1TX_HPF_CUT_SHIFT 0 /* DSP1TX_HPF_CUT - [2:0] */ -#define WM8915_DSP1TX_HPF_CUT_WIDTH 3 /* DSP1TX_HPF_CUT - [2:0] */ - -/* - * R1056 (0x420) - DSP1 RX Filters (1) - */ -#define WM8915_DSP1RX_MUTE 0x0200 /* DSP1RX_MUTE */ -#define WM8915_DSP1RX_MUTE_MASK 0x0200 /* DSP1RX_MUTE */ -#define WM8915_DSP1RX_MUTE_SHIFT 9 /* DSP1RX_MUTE */ -#define WM8915_DSP1RX_MUTE_WIDTH 1 /* DSP1RX_MUTE */ -#define WM8915_DSP1RX_MONO 0x0080 /* DSP1RX_MONO */ -#define WM8915_DSP1RX_MONO_MASK 0x0080 /* DSP1RX_MONO */ -#define WM8915_DSP1RX_MONO_SHIFT 7 /* DSP1RX_MONO */ -#define WM8915_DSP1RX_MONO_WIDTH 1 /* DSP1RX_MONO */ -#define WM8915_DSP1RX_MUTERATE 0x0020 /* DSP1RX_MUTERATE */ -#define WM8915_DSP1RX_MUTERATE_MASK 0x0020 /* DSP1RX_MUTERATE */ -#define WM8915_DSP1RX_MUTERATE_SHIFT 5 /* DSP1RX_MUTERATE */ -#define WM8915_DSP1RX_MUTERATE_WIDTH 1 /* DSP1RX_MUTERATE */ -#define WM8915_DSP1RX_UNMUTE_RAMP 0x0010 /* DSP1RX_UNMUTE_RAMP */ -#define WM8915_DSP1RX_UNMUTE_RAMP_MASK 0x0010 /* DSP1RX_UNMUTE_RAMP */ -#define WM8915_DSP1RX_UNMUTE_RAMP_SHIFT 4 /* DSP1RX_UNMUTE_RAMP */ -#define WM8915_DSP1RX_UNMUTE_RAMP_WIDTH 1 /* DSP1RX_UNMUTE_RAMP */ - -/* - * R1057 (0x421) - DSP1 RX Filters (2) - */ -#define WM8915_DSP1RX_3D_GAIN_MASK 0x3E00 /* DSP1RX_3D_GAIN - [13:9] */ -#define WM8915_DSP1RX_3D_GAIN_SHIFT 9 /* DSP1RX_3D_GAIN - [13:9] */ -#define WM8915_DSP1RX_3D_GAIN_WIDTH 5 /* DSP1RX_3D_GAIN - [13:9] */ -#define WM8915_DSP1RX_3D_ENA 0x0100 /* DSP1RX_3D_ENA */ -#define WM8915_DSP1RX_3D_ENA_MASK 0x0100 /* DSP1RX_3D_ENA */ -#define WM8915_DSP1RX_3D_ENA_SHIFT 8 /* DSP1RX_3D_ENA */ -#define WM8915_DSP1RX_3D_ENA_WIDTH 1 /* DSP1RX_3D_ENA */ - -/* - * R1088 (0x440) - DSP1 DRC (1) - */ -#define WM8915_DSP1DRC_SIG_DET_RMS_MASK 0xF800 /* DSP1DRC_SIG_DET_RMS - [15:11] */ -#define WM8915_DSP1DRC_SIG_DET_RMS_SHIFT 11 /* DSP1DRC_SIG_DET_RMS - [15:11] */ -#define WM8915_DSP1DRC_SIG_DET_RMS_WIDTH 5 /* DSP1DRC_SIG_DET_RMS - [15:11] */ -#define WM8915_DSP1DRC_SIG_DET_PK_MASK 0x0600 /* DSP1DRC_SIG_DET_PK - [10:9] */ -#define WM8915_DSP1DRC_SIG_DET_PK_SHIFT 9 /* DSP1DRC_SIG_DET_PK - [10:9] */ -#define WM8915_DSP1DRC_SIG_DET_PK_WIDTH 2 /* DSP1DRC_SIG_DET_PK - [10:9] */ -#define WM8915_DSP1DRC_NG_ENA 0x0100 /* DSP1DRC_NG_ENA */ -#define WM8915_DSP1DRC_NG_ENA_MASK 0x0100 /* DSP1DRC_NG_ENA */ -#define WM8915_DSP1DRC_NG_ENA_SHIFT 8 /* DSP1DRC_NG_ENA */ -#define WM8915_DSP1DRC_NG_ENA_WIDTH 1 /* DSP1DRC_NG_ENA */ -#define WM8915_DSP1DRC_SIG_DET_MODE 0x0080 /* DSP1DRC_SIG_DET_MODE */ -#define WM8915_DSP1DRC_SIG_DET_MODE_MASK 0x0080 /* DSP1DRC_SIG_DET_MODE */ -#define WM8915_DSP1DRC_SIG_DET_MODE_SHIFT 7 /* DSP1DRC_SIG_DET_MODE */ -#define WM8915_DSP1DRC_SIG_DET_MODE_WIDTH 1 /* DSP1DRC_SIG_DET_MODE */ -#define WM8915_DSP1DRC_SIG_DET 0x0040 /* DSP1DRC_SIG_DET */ -#define WM8915_DSP1DRC_SIG_DET_MASK 0x0040 /* DSP1DRC_SIG_DET */ -#define WM8915_DSP1DRC_SIG_DET_SHIFT 6 /* DSP1DRC_SIG_DET */ -#define WM8915_DSP1DRC_SIG_DET_WIDTH 1 /* DSP1DRC_SIG_DET */ -#define WM8915_DSP1DRC_KNEE2_OP_ENA 0x0020 /* DSP1DRC_KNEE2_OP_ENA */ -#define WM8915_DSP1DRC_KNEE2_OP_ENA_MASK 0x0020 /* DSP1DRC_KNEE2_OP_ENA */ -#define WM8915_DSP1DRC_KNEE2_OP_ENA_SHIFT 5 /* DSP1DRC_KNEE2_OP_ENA */ -#define WM8915_DSP1DRC_KNEE2_OP_ENA_WIDTH 1 /* DSP1DRC_KNEE2_OP_ENA */ -#define WM8915_DSP1DRC_QR 0x0010 /* DSP1DRC_QR */ -#define WM8915_DSP1DRC_QR_MASK 0x0010 /* DSP1DRC_QR */ -#define WM8915_DSP1DRC_QR_SHIFT 4 /* DSP1DRC_QR */ -#define WM8915_DSP1DRC_QR_WIDTH 1 /* DSP1DRC_QR */ -#define WM8915_DSP1DRC_ANTICLIP 0x0008 /* DSP1DRC_ANTICLIP */ -#define WM8915_DSP1DRC_ANTICLIP_MASK 0x0008 /* DSP1DRC_ANTICLIP */ -#define WM8915_DSP1DRC_ANTICLIP_SHIFT 3 /* DSP1DRC_ANTICLIP */ -#define WM8915_DSP1DRC_ANTICLIP_WIDTH 1 /* DSP1DRC_ANTICLIP */ -#define WM8915_DSP1RX_DRC_ENA 0x0004 /* DSP1RX_DRC_ENA */ -#define WM8915_DSP1RX_DRC_ENA_MASK 0x0004 /* DSP1RX_DRC_ENA */ -#define WM8915_DSP1RX_DRC_ENA_SHIFT 2 /* DSP1RX_DRC_ENA */ -#define WM8915_DSP1RX_DRC_ENA_WIDTH 1 /* DSP1RX_DRC_ENA */ -#define WM8915_DSP1TXL_DRC_ENA 0x0002 /* DSP1TXL_DRC_ENA */ -#define WM8915_DSP1TXL_DRC_ENA_MASK 0x0002 /* DSP1TXL_DRC_ENA */ -#define WM8915_DSP1TXL_DRC_ENA_SHIFT 1 /* DSP1TXL_DRC_ENA */ -#define WM8915_DSP1TXL_DRC_ENA_WIDTH 1 /* DSP1TXL_DRC_ENA */ -#define WM8915_DSP1TXR_DRC_ENA 0x0001 /* DSP1TXR_DRC_ENA */ -#define WM8915_DSP1TXR_DRC_ENA_MASK 0x0001 /* DSP1TXR_DRC_ENA */ -#define WM8915_DSP1TXR_DRC_ENA_SHIFT 0 /* DSP1TXR_DRC_ENA */ -#define WM8915_DSP1TXR_DRC_ENA_WIDTH 1 /* DSP1TXR_DRC_ENA */ - -/* - * R1089 (0x441) - DSP1 DRC (2) - */ -#define WM8915_DSP1DRC_ATK_MASK 0x1E00 /* DSP1DRC_ATK - [12:9] */ -#define WM8915_DSP1DRC_ATK_SHIFT 9 /* DSP1DRC_ATK - [12:9] */ -#define WM8915_DSP1DRC_ATK_WIDTH 4 /* DSP1DRC_ATK - [12:9] */ -#define WM8915_DSP1DRC_DCY_MASK 0x01E0 /* DSP1DRC_DCY - [8:5] */ -#define WM8915_DSP1DRC_DCY_SHIFT 5 /* DSP1DRC_DCY - [8:5] */ -#define WM8915_DSP1DRC_DCY_WIDTH 4 /* DSP1DRC_DCY - [8:5] */ -#define WM8915_DSP1DRC_MINGAIN_MASK 0x001C /* DSP1DRC_MINGAIN - [4:2] */ -#define WM8915_DSP1DRC_MINGAIN_SHIFT 2 /* DSP1DRC_MINGAIN - [4:2] */ -#define WM8915_DSP1DRC_MINGAIN_WIDTH 3 /* DSP1DRC_MINGAIN - [4:2] */ -#define WM8915_DSP1DRC_MAXGAIN_MASK 0x0003 /* DSP1DRC_MAXGAIN - [1:0] */ -#define WM8915_DSP1DRC_MAXGAIN_SHIFT 0 /* DSP1DRC_MAXGAIN - [1:0] */ -#define WM8915_DSP1DRC_MAXGAIN_WIDTH 2 /* DSP1DRC_MAXGAIN - [1:0] */ - -/* - * R1090 (0x442) - DSP1 DRC (3) - */ -#define WM8915_DSP1DRC_NG_MINGAIN_MASK 0xF000 /* DSP1DRC_NG_MINGAIN - [15:12] */ -#define WM8915_DSP1DRC_NG_MINGAIN_SHIFT 12 /* DSP1DRC_NG_MINGAIN - [15:12] */ -#define WM8915_DSP1DRC_NG_MINGAIN_WIDTH 4 /* DSP1DRC_NG_MINGAIN - [15:12] */ -#define WM8915_DSP1DRC_NG_EXP_MASK 0x0C00 /* DSP1DRC_NG_EXP - [11:10] */ -#define WM8915_DSP1DRC_NG_EXP_SHIFT 10 /* DSP1DRC_NG_EXP - [11:10] */ -#define WM8915_DSP1DRC_NG_EXP_WIDTH 2 /* DSP1DRC_NG_EXP - [11:10] */ -#define WM8915_DSP1DRC_QR_THR_MASK 0x0300 /* DSP1DRC_QR_THR - [9:8] */ -#define WM8915_DSP1DRC_QR_THR_SHIFT 8 /* DSP1DRC_QR_THR - [9:8] */ -#define WM8915_DSP1DRC_QR_THR_WIDTH 2 /* DSP1DRC_QR_THR - [9:8] */ -#define WM8915_DSP1DRC_QR_DCY_MASK 0x00C0 /* DSP1DRC_QR_DCY - [7:6] */ -#define WM8915_DSP1DRC_QR_DCY_SHIFT 6 /* DSP1DRC_QR_DCY - [7:6] */ -#define WM8915_DSP1DRC_QR_DCY_WIDTH 2 /* DSP1DRC_QR_DCY - [7:6] */ -#define WM8915_DSP1DRC_HI_COMP_MASK 0x0038 /* DSP1DRC_HI_COMP - [5:3] */ -#define WM8915_DSP1DRC_HI_COMP_SHIFT 3 /* DSP1DRC_HI_COMP - [5:3] */ -#define WM8915_DSP1DRC_HI_COMP_WIDTH 3 /* DSP1DRC_HI_COMP - [5:3] */ -#define WM8915_DSP1DRC_LO_COMP_MASK 0x0007 /* DSP1DRC_LO_COMP - [2:0] */ -#define WM8915_DSP1DRC_LO_COMP_SHIFT 0 /* DSP1DRC_LO_COMP - [2:0] */ -#define WM8915_DSP1DRC_LO_COMP_WIDTH 3 /* DSP1DRC_LO_COMP - [2:0] */ - -/* - * R1091 (0x443) - DSP1 DRC (4) - */ -#define WM8915_DSP1DRC_KNEE_IP_MASK 0x07E0 /* DSP1DRC_KNEE_IP - [10:5] */ -#define WM8915_DSP1DRC_KNEE_IP_SHIFT 5 /* DSP1DRC_KNEE_IP - [10:5] */ -#define WM8915_DSP1DRC_KNEE_IP_WIDTH 6 /* DSP1DRC_KNEE_IP - [10:5] */ -#define WM8915_DSP1DRC_KNEE_OP_MASK 0x001F /* DSP1DRC_KNEE_OP - [4:0] */ -#define WM8915_DSP1DRC_KNEE_OP_SHIFT 0 /* DSP1DRC_KNEE_OP - [4:0] */ -#define WM8915_DSP1DRC_KNEE_OP_WIDTH 5 /* DSP1DRC_KNEE_OP - [4:0] */ - -/* - * R1092 (0x444) - DSP1 DRC (5) - */ -#define WM8915_DSP1DRC_KNEE2_IP_MASK 0x03E0 /* DSP1DRC_KNEE2_IP - [9:5] */ -#define WM8915_DSP1DRC_KNEE2_IP_SHIFT 5 /* DSP1DRC_KNEE2_IP - [9:5] */ -#define WM8915_DSP1DRC_KNEE2_IP_WIDTH 5 /* DSP1DRC_KNEE2_IP - [9:5] */ -#define WM8915_DSP1DRC_KNEE2_OP_MASK 0x001F /* DSP1DRC_KNEE2_OP - [4:0] */ -#define WM8915_DSP1DRC_KNEE2_OP_SHIFT 0 /* DSP1DRC_KNEE2_OP - [4:0] */ -#define WM8915_DSP1DRC_KNEE2_OP_WIDTH 5 /* DSP1DRC_KNEE2_OP - [4:0] */ - -/* - * R1152 (0x480) - DSP1 RX EQ Gains (1) - */ -#define WM8915_DSP1RX_EQ_B1_GAIN_MASK 0xF800 /* DSP1RX_EQ_B1_GAIN - [15:11] */ -#define WM8915_DSP1RX_EQ_B1_GAIN_SHIFT 11 /* DSP1RX_EQ_B1_GAIN - [15:11] */ -#define WM8915_DSP1RX_EQ_B1_GAIN_WIDTH 5 /* DSP1RX_EQ_B1_GAIN - [15:11] */ -#define WM8915_DSP1RX_EQ_B2_GAIN_MASK 0x07C0 /* DSP1RX_EQ_B2_GAIN - [10:6] */ -#define WM8915_DSP1RX_EQ_B2_GAIN_SHIFT 6 /* DSP1RX_EQ_B2_GAIN - [10:6] */ -#define WM8915_DSP1RX_EQ_B2_GAIN_WIDTH 5 /* DSP1RX_EQ_B2_GAIN - [10:6] */ -#define WM8915_DSP1RX_EQ_B3_GAIN_MASK 0x003E /* DSP1RX_EQ_B3_GAIN - [5:1] */ -#define WM8915_DSP1RX_EQ_B3_GAIN_SHIFT 1 /* DSP1RX_EQ_B3_GAIN - [5:1] */ -#define WM8915_DSP1RX_EQ_B3_GAIN_WIDTH 5 /* DSP1RX_EQ_B3_GAIN - [5:1] */ -#define WM8915_DSP1RX_EQ_ENA 0x0001 /* DSP1RX_EQ_ENA */ -#define WM8915_DSP1RX_EQ_ENA_MASK 0x0001 /* DSP1RX_EQ_ENA */ -#define WM8915_DSP1RX_EQ_ENA_SHIFT 0 /* DSP1RX_EQ_ENA */ -#define WM8915_DSP1RX_EQ_ENA_WIDTH 1 /* DSP1RX_EQ_ENA */ - -/* - * R1153 (0x481) - DSP1 RX EQ Gains (2) - */ -#define WM8915_DSP1RX_EQ_B4_GAIN_MASK 0xF800 /* DSP1RX_EQ_B4_GAIN - [15:11] */ -#define WM8915_DSP1RX_EQ_B4_GAIN_SHIFT 11 /* DSP1RX_EQ_B4_GAIN - [15:11] */ -#define WM8915_DSP1RX_EQ_B4_GAIN_WIDTH 5 /* DSP1RX_EQ_B4_GAIN - [15:11] */ -#define WM8915_DSP1RX_EQ_B5_GAIN_MASK 0x07C0 /* DSP1RX_EQ_B5_GAIN - [10:6] */ -#define WM8915_DSP1RX_EQ_B5_GAIN_SHIFT 6 /* DSP1RX_EQ_B5_GAIN - [10:6] */ -#define WM8915_DSP1RX_EQ_B5_GAIN_WIDTH 5 /* DSP1RX_EQ_B5_GAIN - [10:6] */ - -/* - * R1154 (0x482) - DSP1 RX EQ Band 1 A - */ -#define WM8915_DSP1RX_EQ_B1_A_MASK 0xFFFF /* DSP1RX_EQ_B1_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B1_A_SHIFT 0 /* DSP1RX_EQ_B1_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B1_A_WIDTH 16 /* DSP1RX_EQ_B1_A - [15:0] */ - -/* - * R1155 (0x483) - DSP1 RX EQ Band 1 B - */ -#define WM8915_DSP1RX_EQ_B1_B_MASK 0xFFFF /* DSP1RX_EQ_B1_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B1_B_SHIFT 0 /* DSP1RX_EQ_B1_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B1_B_WIDTH 16 /* DSP1RX_EQ_B1_B - [15:0] */ - -/* - * R1156 (0x484) - DSP1 RX EQ Band 1 PG - */ -#define WM8915_DSP1RX_EQ_B1_PG_MASK 0xFFFF /* DSP1RX_EQ_B1_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B1_PG_SHIFT 0 /* DSP1RX_EQ_B1_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B1_PG_WIDTH 16 /* DSP1RX_EQ_B1_PG - [15:0] */ - -/* - * R1157 (0x485) - DSP1 RX EQ Band 2 A - */ -#define WM8915_DSP1RX_EQ_B2_A_MASK 0xFFFF /* DSP1RX_EQ_B2_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_A_SHIFT 0 /* DSP1RX_EQ_B2_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_A_WIDTH 16 /* DSP1RX_EQ_B2_A - [15:0] */ - -/* - * R1158 (0x486) - DSP1 RX EQ Band 2 B - */ -#define WM8915_DSP1RX_EQ_B2_B_MASK 0xFFFF /* DSP1RX_EQ_B2_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_B_SHIFT 0 /* DSP1RX_EQ_B2_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_B_WIDTH 16 /* DSP1RX_EQ_B2_B - [15:0] */ - -/* - * R1159 (0x487) - DSP1 RX EQ Band 2 C - */ -#define WM8915_DSP1RX_EQ_B2_C_MASK 0xFFFF /* DSP1RX_EQ_B2_C - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_C_SHIFT 0 /* DSP1RX_EQ_B2_C - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_C_WIDTH 16 /* DSP1RX_EQ_B2_C - [15:0] */ - -/* - * R1160 (0x488) - DSP1 RX EQ Band 2 PG - */ -#define WM8915_DSP1RX_EQ_B2_PG_MASK 0xFFFF /* DSP1RX_EQ_B2_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_PG_SHIFT 0 /* DSP1RX_EQ_B2_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B2_PG_WIDTH 16 /* DSP1RX_EQ_B2_PG - [15:0] */ - -/* - * R1161 (0x489) - DSP1 RX EQ Band 3 A - */ -#define WM8915_DSP1RX_EQ_B3_A_MASK 0xFFFF /* DSP1RX_EQ_B3_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_A_SHIFT 0 /* DSP1RX_EQ_B3_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_A_WIDTH 16 /* DSP1RX_EQ_B3_A - [15:0] */ - -/* - * R1162 (0x48A) - DSP1 RX EQ Band 3 B - */ -#define WM8915_DSP1RX_EQ_B3_B_MASK 0xFFFF /* DSP1RX_EQ_B3_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_B_SHIFT 0 /* DSP1RX_EQ_B3_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_B_WIDTH 16 /* DSP1RX_EQ_B3_B - [15:0] */ - -/* - * R1163 (0x48B) - DSP1 RX EQ Band 3 C - */ -#define WM8915_DSP1RX_EQ_B3_C_MASK 0xFFFF /* DSP1RX_EQ_B3_C - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_C_SHIFT 0 /* DSP1RX_EQ_B3_C - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_C_WIDTH 16 /* DSP1RX_EQ_B3_C - [15:0] */ - -/* - * R1164 (0x48C) - DSP1 RX EQ Band 3 PG - */ -#define WM8915_DSP1RX_EQ_B3_PG_MASK 0xFFFF /* DSP1RX_EQ_B3_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_PG_SHIFT 0 /* DSP1RX_EQ_B3_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B3_PG_WIDTH 16 /* DSP1RX_EQ_B3_PG - [15:0] */ - -/* - * R1165 (0x48D) - DSP1 RX EQ Band 4 A - */ -#define WM8915_DSP1RX_EQ_B4_A_MASK 0xFFFF /* DSP1RX_EQ_B4_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_A_SHIFT 0 /* DSP1RX_EQ_B4_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_A_WIDTH 16 /* DSP1RX_EQ_B4_A - [15:0] */ - -/* - * R1166 (0x48E) - DSP1 RX EQ Band 4 B - */ -#define WM8915_DSP1RX_EQ_B4_B_MASK 0xFFFF /* DSP1RX_EQ_B4_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_B_SHIFT 0 /* DSP1RX_EQ_B4_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_B_WIDTH 16 /* DSP1RX_EQ_B4_B - [15:0] */ - -/* - * R1167 (0x48F) - DSP1 RX EQ Band 4 C - */ -#define WM8915_DSP1RX_EQ_B4_C_MASK 0xFFFF /* DSP1RX_EQ_B4_C - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_C_SHIFT 0 /* DSP1RX_EQ_B4_C - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_C_WIDTH 16 /* DSP1RX_EQ_B4_C - [15:0] */ - -/* - * R1168 (0x490) - DSP1 RX EQ Band 4 PG - */ -#define WM8915_DSP1RX_EQ_B4_PG_MASK 0xFFFF /* DSP1RX_EQ_B4_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_PG_SHIFT 0 /* DSP1RX_EQ_B4_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B4_PG_WIDTH 16 /* DSP1RX_EQ_B4_PG - [15:0] */ - -/* - * R1169 (0x491) - DSP1 RX EQ Band 5 A - */ -#define WM8915_DSP1RX_EQ_B5_A_MASK 0xFFFF /* DSP1RX_EQ_B5_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B5_A_SHIFT 0 /* DSP1RX_EQ_B5_A - [15:0] */ -#define WM8915_DSP1RX_EQ_B5_A_WIDTH 16 /* DSP1RX_EQ_B5_A - [15:0] */ - -/* - * R1170 (0x492) - DSP1 RX EQ Band 5 B - */ -#define WM8915_DSP1RX_EQ_B5_B_MASK 0xFFFF /* DSP1RX_EQ_B5_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B5_B_SHIFT 0 /* DSP1RX_EQ_B5_B - [15:0] */ -#define WM8915_DSP1RX_EQ_B5_B_WIDTH 16 /* DSP1RX_EQ_B5_B - [15:0] */ - -/* - * R1171 (0x493) - DSP1 RX EQ Band 5 PG - */ -#define WM8915_DSP1RX_EQ_B5_PG_MASK 0xFFFF /* DSP1RX_EQ_B5_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B5_PG_SHIFT 0 /* DSP1RX_EQ_B5_PG - [15:0] */ -#define WM8915_DSP1RX_EQ_B5_PG_WIDTH 16 /* DSP1RX_EQ_B5_PG - [15:0] */ - -/* - * R1280 (0x500) - DSP2 TX Left Volume - */ -#define WM8915_DSP2TX_VU 0x0100 /* DSP2TX_VU */ -#define WM8915_DSP2TX_VU_MASK 0x0100 /* DSP2TX_VU */ -#define WM8915_DSP2TX_VU_SHIFT 8 /* DSP2TX_VU */ -#define WM8915_DSP2TX_VU_WIDTH 1 /* DSP2TX_VU */ -#define WM8915_DSP2TXL_VOL_MASK 0x00FF /* DSP2TXL_VOL - [7:0] */ -#define WM8915_DSP2TXL_VOL_SHIFT 0 /* DSP2TXL_VOL - [7:0] */ -#define WM8915_DSP2TXL_VOL_WIDTH 8 /* DSP2TXL_VOL - [7:0] */ - -/* - * R1281 (0x501) - DSP2 TX Right Volume - */ -#define WM8915_DSP2TX_VU 0x0100 /* DSP2TX_VU */ -#define WM8915_DSP2TX_VU_MASK 0x0100 /* DSP2TX_VU */ -#define WM8915_DSP2TX_VU_SHIFT 8 /* DSP2TX_VU */ -#define WM8915_DSP2TX_VU_WIDTH 1 /* DSP2TX_VU */ -#define WM8915_DSP2TXR_VOL_MASK 0x00FF /* DSP2TXR_VOL - [7:0] */ -#define WM8915_DSP2TXR_VOL_SHIFT 0 /* DSP2TXR_VOL - [7:0] */ -#define WM8915_DSP2TXR_VOL_WIDTH 8 /* DSP2TXR_VOL - [7:0] */ - -/* - * R1282 (0x502) - DSP2 RX Left Volume - */ -#define WM8915_DSP2RX_VU 0x0100 /* DSP2RX_VU */ -#define WM8915_DSP2RX_VU_MASK 0x0100 /* DSP2RX_VU */ -#define WM8915_DSP2RX_VU_SHIFT 8 /* DSP2RX_VU */ -#define WM8915_DSP2RX_VU_WIDTH 1 /* DSP2RX_VU */ -#define WM8915_DSP2RXL_VOL_MASK 0x00FF /* DSP2RXL_VOL - [7:0] */ -#define WM8915_DSP2RXL_VOL_SHIFT 0 /* DSP2RXL_VOL - [7:0] */ -#define WM8915_DSP2RXL_VOL_WIDTH 8 /* DSP2RXL_VOL - [7:0] */ - -/* - * R1283 (0x503) - DSP2 RX Right Volume - */ -#define WM8915_DSP2RX_VU 0x0100 /* DSP2RX_VU */ -#define WM8915_DSP2RX_VU_MASK 0x0100 /* DSP2RX_VU */ -#define WM8915_DSP2RX_VU_SHIFT 8 /* DSP2RX_VU */ -#define WM8915_DSP2RX_VU_WIDTH 1 /* DSP2RX_VU */ -#define WM8915_DSP2RXR_VOL_MASK 0x00FF /* DSP2RXR_VOL - [7:0] */ -#define WM8915_DSP2RXR_VOL_SHIFT 0 /* DSP2RXR_VOL - [7:0] */ -#define WM8915_DSP2RXR_VOL_WIDTH 8 /* DSP2RXR_VOL - [7:0] */ - -/* - * R1296 (0x510) - DSP2 TX Filters - */ -#define WM8915_DSP2TX_NF 0x2000 /* DSP2TX_NF */ -#define WM8915_DSP2TX_NF_MASK 0x2000 /* DSP2TX_NF */ -#define WM8915_DSP2TX_NF_SHIFT 13 /* DSP2TX_NF */ -#define WM8915_DSP2TX_NF_WIDTH 1 /* DSP2TX_NF */ -#define WM8915_DSP2TXL_HPF 0x1000 /* DSP2TXL_HPF */ -#define WM8915_DSP2TXL_HPF_MASK 0x1000 /* DSP2TXL_HPF */ -#define WM8915_DSP2TXL_HPF_SHIFT 12 /* DSP2TXL_HPF */ -#define WM8915_DSP2TXL_HPF_WIDTH 1 /* DSP2TXL_HPF */ -#define WM8915_DSP2TXR_HPF 0x0800 /* DSP2TXR_HPF */ -#define WM8915_DSP2TXR_HPF_MASK 0x0800 /* DSP2TXR_HPF */ -#define WM8915_DSP2TXR_HPF_SHIFT 11 /* DSP2TXR_HPF */ -#define WM8915_DSP2TXR_HPF_WIDTH 1 /* DSP2TXR_HPF */ -#define WM8915_DSP2TX_HPF_MODE_MASK 0x0018 /* DSP2TX_HPF_MODE - [4:3] */ -#define WM8915_DSP2TX_HPF_MODE_SHIFT 3 /* DSP2TX_HPF_MODE - [4:3] */ -#define WM8915_DSP2TX_HPF_MODE_WIDTH 2 /* DSP2TX_HPF_MODE - [4:3] */ -#define WM8915_DSP2TX_HPF_CUT_MASK 0x0007 /* DSP2TX_HPF_CUT - [2:0] */ -#define WM8915_DSP2TX_HPF_CUT_SHIFT 0 /* DSP2TX_HPF_CUT - [2:0] */ -#define WM8915_DSP2TX_HPF_CUT_WIDTH 3 /* DSP2TX_HPF_CUT - [2:0] */ - -/* - * R1312 (0x520) - DSP2 RX Filters (1) - */ -#define WM8915_DSP2RX_MUTE 0x0200 /* DSP2RX_MUTE */ -#define WM8915_DSP2RX_MUTE_MASK 0x0200 /* DSP2RX_MUTE */ -#define WM8915_DSP2RX_MUTE_SHIFT 9 /* DSP2RX_MUTE */ -#define WM8915_DSP2RX_MUTE_WIDTH 1 /* DSP2RX_MUTE */ -#define WM8915_DSP2RX_MONO 0x0080 /* DSP2RX_MONO */ -#define WM8915_DSP2RX_MONO_MASK 0x0080 /* DSP2RX_MONO */ -#define WM8915_DSP2RX_MONO_SHIFT 7 /* DSP2RX_MONO */ -#define WM8915_DSP2RX_MONO_WIDTH 1 /* DSP2RX_MONO */ -#define WM8915_DSP2RX_MUTERATE 0x0020 /* DSP2RX_MUTERATE */ -#define WM8915_DSP2RX_MUTERATE_MASK 0x0020 /* DSP2RX_MUTERATE */ -#define WM8915_DSP2RX_MUTERATE_SHIFT 5 /* DSP2RX_MUTERATE */ -#define WM8915_DSP2RX_MUTERATE_WIDTH 1 /* DSP2RX_MUTERATE */ -#define WM8915_DSP2RX_UNMUTE_RAMP 0x0010 /* DSP2RX_UNMUTE_RAMP */ -#define WM8915_DSP2RX_UNMUTE_RAMP_MASK 0x0010 /* DSP2RX_UNMUTE_RAMP */ -#define WM8915_DSP2RX_UNMUTE_RAMP_SHIFT 4 /* DSP2RX_UNMUTE_RAMP */ -#define WM8915_DSP2RX_UNMUTE_RAMP_WIDTH 1 /* DSP2RX_UNMUTE_RAMP */ - -/* - * R1313 (0x521) - DSP2 RX Filters (2) - */ -#define WM8915_DSP2RX_3D_GAIN_MASK 0x3E00 /* DSP2RX_3D_GAIN - [13:9] */ -#define WM8915_DSP2RX_3D_GAIN_SHIFT 9 /* DSP2RX_3D_GAIN - [13:9] */ -#define WM8915_DSP2RX_3D_GAIN_WIDTH 5 /* DSP2RX_3D_GAIN - [13:9] */ -#define WM8915_DSP2RX_3D_ENA 0x0100 /* DSP2RX_3D_ENA */ -#define WM8915_DSP2RX_3D_ENA_MASK 0x0100 /* DSP2RX_3D_ENA */ -#define WM8915_DSP2RX_3D_ENA_SHIFT 8 /* DSP2RX_3D_ENA */ -#define WM8915_DSP2RX_3D_ENA_WIDTH 1 /* DSP2RX_3D_ENA */ - -/* - * R1344 (0x540) - DSP2 DRC (1) - */ -#define WM8915_DSP2DRC_SIG_DET_RMS_MASK 0xF800 /* DSP2DRC_SIG_DET_RMS - [15:11] */ -#define WM8915_DSP2DRC_SIG_DET_RMS_SHIFT 11 /* DSP2DRC_SIG_DET_RMS - [15:11] */ -#define WM8915_DSP2DRC_SIG_DET_RMS_WIDTH 5 /* DSP2DRC_SIG_DET_RMS - [15:11] */ -#define WM8915_DSP2DRC_SIG_DET_PK_MASK 0x0600 /* DSP2DRC_SIG_DET_PK - [10:9] */ -#define WM8915_DSP2DRC_SIG_DET_PK_SHIFT 9 /* DSP2DRC_SIG_DET_PK - [10:9] */ -#define WM8915_DSP2DRC_SIG_DET_PK_WIDTH 2 /* DSP2DRC_SIG_DET_PK - [10:9] */ -#define WM8915_DSP2DRC_NG_ENA 0x0100 /* DSP2DRC_NG_ENA */ -#define WM8915_DSP2DRC_NG_ENA_MASK 0x0100 /* DSP2DRC_NG_ENA */ -#define WM8915_DSP2DRC_NG_ENA_SHIFT 8 /* DSP2DRC_NG_ENA */ -#define WM8915_DSP2DRC_NG_ENA_WIDTH 1 /* DSP2DRC_NG_ENA */ -#define WM8915_DSP2DRC_SIG_DET_MODE 0x0080 /* DSP2DRC_SIG_DET_MODE */ -#define WM8915_DSP2DRC_SIG_DET_MODE_MASK 0x0080 /* DSP2DRC_SIG_DET_MODE */ -#define WM8915_DSP2DRC_SIG_DET_MODE_SHIFT 7 /* DSP2DRC_SIG_DET_MODE */ -#define WM8915_DSP2DRC_SIG_DET_MODE_WIDTH 1 /* DSP2DRC_SIG_DET_MODE */ -#define WM8915_DSP2DRC_SIG_DET 0x0040 /* DSP2DRC_SIG_DET */ -#define WM8915_DSP2DRC_SIG_DET_MASK 0x0040 /* DSP2DRC_SIG_DET */ -#define WM8915_DSP2DRC_SIG_DET_SHIFT 6 /* DSP2DRC_SIG_DET */ -#define WM8915_DSP2DRC_SIG_DET_WIDTH 1 /* DSP2DRC_SIG_DET */ -#define WM8915_DSP2DRC_KNEE2_OP_ENA 0x0020 /* DSP2DRC_KNEE2_OP_ENA */ -#define WM8915_DSP2DRC_KNEE2_OP_ENA_MASK 0x0020 /* DSP2DRC_KNEE2_OP_ENA */ -#define WM8915_DSP2DRC_KNEE2_OP_ENA_SHIFT 5 /* DSP2DRC_KNEE2_OP_ENA */ -#define WM8915_DSP2DRC_KNEE2_OP_ENA_WIDTH 1 /* DSP2DRC_KNEE2_OP_ENA */ -#define WM8915_DSP2DRC_QR 0x0010 /* DSP2DRC_QR */ -#define WM8915_DSP2DRC_QR_MASK 0x0010 /* DSP2DRC_QR */ -#define WM8915_DSP2DRC_QR_SHIFT 4 /* DSP2DRC_QR */ -#define WM8915_DSP2DRC_QR_WIDTH 1 /* DSP2DRC_QR */ -#define WM8915_DSP2DRC_ANTICLIP 0x0008 /* DSP2DRC_ANTICLIP */ -#define WM8915_DSP2DRC_ANTICLIP_MASK 0x0008 /* DSP2DRC_ANTICLIP */ -#define WM8915_DSP2DRC_ANTICLIP_SHIFT 3 /* DSP2DRC_ANTICLIP */ -#define WM8915_DSP2DRC_ANTICLIP_WIDTH 1 /* DSP2DRC_ANTICLIP */ -#define WM8915_DSP2RX_DRC_ENA 0x0004 /* DSP2RX_DRC_ENA */ -#define WM8915_DSP2RX_DRC_ENA_MASK 0x0004 /* DSP2RX_DRC_ENA */ -#define WM8915_DSP2RX_DRC_ENA_SHIFT 2 /* DSP2RX_DRC_ENA */ -#define WM8915_DSP2RX_DRC_ENA_WIDTH 1 /* DSP2RX_DRC_ENA */ -#define WM8915_DSP2TXL_DRC_ENA 0x0002 /* DSP2TXL_DRC_ENA */ -#define WM8915_DSP2TXL_DRC_ENA_MASK 0x0002 /* DSP2TXL_DRC_ENA */ -#define WM8915_DSP2TXL_DRC_ENA_SHIFT 1 /* DSP2TXL_DRC_ENA */ -#define WM8915_DSP2TXL_DRC_ENA_WIDTH 1 /* DSP2TXL_DRC_ENA */ -#define WM8915_DSP2TXR_DRC_ENA 0x0001 /* DSP2TXR_DRC_ENA */ -#define WM8915_DSP2TXR_DRC_ENA_MASK 0x0001 /* DSP2TXR_DRC_ENA */ -#define WM8915_DSP2TXR_DRC_ENA_SHIFT 0 /* DSP2TXR_DRC_ENA */ -#define WM8915_DSP2TXR_DRC_ENA_WIDTH 1 /* DSP2TXR_DRC_ENA */ - -/* - * R1345 (0x541) - DSP2 DRC (2) - */ -#define WM8915_DSP2DRC_ATK_MASK 0x1E00 /* DSP2DRC_ATK - [12:9] */ -#define WM8915_DSP2DRC_ATK_SHIFT 9 /* DSP2DRC_ATK - [12:9] */ -#define WM8915_DSP2DRC_ATK_WIDTH 4 /* DSP2DRC_ATK - [12:9] */ -#define WM8915_DSP2DRC_DCY_MASK 0x01E0 /* DSP2DRC_DCY - [8:5] */ -#define WM8915_DSP2DRC_DCY_SHIFT 5 /* DSP2DRC_DCY - [8:5] */ -#define WM8915_DSP2DRC_DCY_WIDTH 4 /* DSP2DRC_DCY - [8:5] */ -#define WM8915_DSP2DRC_MINGAIN_MASK 0x001C /* DSP2DRC_MINGAIN - [4:2] */ -#define WM8915_DSP2DRC_MINGAIN_SHIFT 2 /* DSP2DRC_MINGAIN - [4:2] */ -#define WM8915_DSP2DRC_MINGAIN_WIDTH 3 /* DSP2DRC_MINGAIN - [4:2] */ -#define WM8915_DSP2DRC_MAXGAIN_MASK 0x0003 /* DSP2DRC_MAXGAIN - [1:0] */ -#define WM8915_DSP2DRC_MAXGAIN_SHIFT 0 /* DSP2DRC_MAXGAIN - [1:0] */ -#define WM8915_DSP2DRC_MAXGAIN_WIDTH 2 /* DSP2DRC_MAXGAIN - [1:0] */ - -/* - * R1346 (0x542) - DSP2 DRC (3) - */ -#define WM8915_DSP2DRC_NG_MINGAIN_MASK 0xF000 /* DSP2DRC_NG_MINGAIN - [15:12] */ -#define WM8915_DSP2DRC_NG_MINGAIN_SHIFT 12 /* DSP2DRC_NG_MINGAIN - [15:12] */ -#define WM8915_DSP2DRC_NG_MINGAIN_WIDTH 4 /* DSP2DRC_NG_MINGAIN - [15:12] */ -#define WM8915_DSP2DRC_NG_EXP_MASK 0x0C00 /* DSP2DRC_NG_EXP - [11:10] */ -#define WM8915_DSP2DRC_NG_EXP_SHIFT 10 /* DSP2DRC_NG_EXP - [11:10] */ -#define WM8915_DSP2DRC_NG_EXP_WIDTH 2 /* DSP2DRC_NG_EXP - [11:10] */ -#define WM8915_DSP2DRC_QR_THR_MASK 0x0300 /* DSP2DRC_QR_THR - [9:8] */ -#define WM8915_DSP2DRC_QR_THR_SHIFT 8 /* DSP2DRC_QR_THR - [9:8] */ -#define WM8915_DSP2DRC_QR_THR_WIDTH 2 /* DSP2DRC_QR_THR - [9:8] */ -#define WM8915_DSP2DRC_QR_DCY_MASK 0x00C0 /* DSP2DRC_QR_DCY - [7:6] */ -#define WM8915_DSP2DRC_QR_DCY_SHIFT 6 /* DSP2DRC_QR_DCY - [7:6] */ -#define WM8915_DSP2DRC_QR_DCY_WIDTH 2 /* DSP2DRC_QR_DCY - [7:6] */ -#define WM8915_DSP2DRC_HI_COMP_MASK 0x0038 /* DSP2DRC_HI_COMP - [5:3] */ -#define WM8915_DSP2DRC_HI_COMP_SHIFT 3 /* DSP2DRC_HI_COMP - [5:3] */ -#define WM8915_DSP2DRC_HI_COMP_WIDTH 3 /* DSP2DRC_HI_COMP - [5:3] */ -#define WM8915_DSP2DRC_LO_COMP_MASK 0x0007 /* DSP2DRC_LO_COMP - [2:0] */ -#define WM8915_DSP2DRC_LO_COMP_SHIFT 0 /* DSP2DRC_LO_COMP - [2:0] */ -#define WM8915_DSP2DRC_LO_COMP_WIDTH 3 /* DSP2DRC_LO_COMP - [2:0] */ - -/* - * R1347 (0x543) - DSP2 DRC (4) - */ -#define WM8915_DSP2DRC_KNEE_IP_MASK 0x07E0 /* DSP2DRC_KNEE_IP - [10:5] */ -#define WM8915_DSP2DRC_KNEE_IP_SHIFT 5 /* DSP2DRC_KNEE_IP - [10:5] */ -#define WM8915_DSP2DRC_KNEE_IP_WIDTH 6 /* DSP2DRC_KNEE_IP - [10:5] */ -#define WM8915_DSP2DRC_KNEE_OP_MASK 0x001F /* DSP2DRC_KNEE_OP - [4:0] */ -#define WM8915_DSP2DRC_KNEE_OP_SHIFT 0 /* DSP2DRC_KNEE_OP - [4:0] */ -#define WM8915_DSP2DRC_KNEE_OP_WIDTH 5 /* DSP2DRC_KNEE_OP - [4:0] */ - -/* - * R1348 (0x544) - DSP2 DRC (5) - */ -#define WM8915_DSP2DRC_KNEE2_IP_MASK 0x03E0 /* DSP2DRC_KNEE2_IP - [9:5] */ -#define WM8915_DSP2DRC_KNEE2_IP_SHIFT 5 /* DSP2DRC_KNEE2_IP - [9:5] */ -#define WM8915_DSP2DRC_KNEE2_IP_WIDTH 5 /* DSP2DRC_KNEE2_IP - [9:5] */ -#define WM8915_DSP2DRC_KNEE2_OP_MASK 0x001F /* DSP2DRC_KNEE2_OP - [4:0] */ -#define WM8915_DSP2DRC_KNEE2_OP_SHIFT 0 /* DSP2DRC_KNEE2_OP - [4:0] */ -#define WM8915_DSP2DRC_KNEE2_OP_WIDTH 5 /* DSP2DRC_KNEE2_OP - [4:0] */ - -/* - * R1408 (0x580) - DSP2 RX EQ Gains (1) - */ -#define WM8915_DSP2RX_EQ_B1_GAIN_MASK 0xF800 /* DSP2RX_EQ_B1_GAIN - [15:11] */ -#define WM8915_DSP2RX_EQ_B1_GAIN_SHIFT 11 /* DSP2RX_EQ_B1_GAIN - [15:11] */ -#define WM8915_DSP2RX_EQ_B1_GAIN_WIDTH 5 /* DSP2RX_EQ_B1_GAIN - [15:11] */ -#define WM8915_DSP2RX_EQ_B2_GAIN_MASK 0x07C0 /* DSP2RX_EQ_B2_GAIN - [10:6] */ -#define WM8915_DSP2RX_EQ_B2_GAIN_SHIFT 6 /* DSP2RX_EQ_B2_GAIN - [10:6] */ -#define WM8915_DSP2RX_EQ_B2_GAIN_WIDTH 5 /* DSP2RX_EQ_B2_GAIN - [10:6] */ -#define WM8915_DSP2RX_EQ_B3_GAIN_MASK 0x003E /* DSP2RX_EQ_B3_GAIN - [5:1] */ -#define WM8915_DSP2RX_EQ_B3_GAIN_SHIFT 1 /* DSP2RX_EQ_B3_GAIN - [5:1] */ -#define WM8915_DSP2RX_EQ_B3_GAIN_WIDTH 5 /* DSP2RX_EQ_B3_GAIN - [5:1] */ -#define WM8915_DSP2RX_EQ_ENA 0x0001 /* DSP2RX_EQ_ENA */ -#define WM8915_DSP2RX_EQ_ENA_MASK 0x0001 /* DSP2RX_EQ_ENA */ -#define WM8915_DSP2RX_EQ_ENA_SHIFT 0 /* DSP2RX_EQ_ENA */ -#define WM8915_DSP2RX_EQ_ENA_WIDTH 1 /* DSP2RX_EQ_ENA */ - -/* - * R1409 (0x581) - DSP2 RX EQ Gains (2) - */ -#define WM8915_DSP2RX_EQ_B4_GAIN_MASK 0xF800 /* DSP2RX_EQ_B4_GAIN - [15:11] */ -#define WM8915_DSP2RX_EQ_B4_GAIN_SHIFT 11 /* DSP2RX_EQ_B4_GAIN - [15:11] */ -#define WM8915_DSP2RX_EQ_B4_GAIN_WIDTH 5 /* DSP2RX_EQ_B4_GAIN - [15:11] */ -#define WM8915_DSP2RX_EQ_B5_GAIN_MASK 0x07C0 /* DSP2RX_EQ_B5_GAIN - [10:6] */ -#define WM8915_DSP2RX_EQ_B5_GAIN_SHIFT 6 /* DSP2RX_EQ_B5_GAIN - [10:6] */ -#define WM8915_DSP2RX_EQ_B5_GAIN_WIDTH 5 /* DSP2RX_EQ_B5_GAIN - [10:6] */ - -/* - * R1410 (0x582) - DSP2 RX EQ Band 1 A - */ -#define WM8915_DSP2RX_EQ_B1_A_MASK 0xFFFF /* DSP2RX_EQ_B1_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B1_A_SHIFT 0 /* DSP2RX_EQ_B1_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B1_A_WIDTH 16 /* DSP2RX_EQ_B1_A - [15:0] */ - -/* - * R1411 (0x583) - DSP2 RX EQ Band 1 B - */ -#define WM8915_DSP2RX_EQ_B1_B_MASK 0xFFFF /* DSP2RX_EQ_B1_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B1_B_SHIFT 0 /* DSP2RX_EQ_B1_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B1_B_WIDTH 16 /* DSP2RX_EQ_B1_B - [15:0] */ - -/* - * R1412 (0x584) - DSP2 RX EQ Band 1 PG - */ -#define WM8915_DSP2RX_EQ_B1_PG_MASK 0xFFFF /* DSP2RX_EQ_B1_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B1_PG_SHIFT 0 /* DSP2RX_EQ_B1_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B1_PG_WIDTH 16 /* DSP2RX_EQ_B1_PG - [15:0] */ - -/* - * R1413 (0x585) - DSP2 RX EQ Band 2 A - */ -#define WM8915_DSP2RX_EQ_B2_A_MASK 0xFFFF /* DSP2RX_EQ_B2_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_A_SHIFT 0 /* DSP2RX_EQ_B2_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_A_WIDTH 16 /* DSP2RX_EQ_B2_A - [15:0] */ - -/* - * R1414 (0x586) - DSP2 RX EQ Band 2 B - */ -#define WM8915_DSP2RX_EQ_B2_B_MASK 0xFFFF /* DSP2RX_EQ_B2_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_B_SHIFT 0 /* DSP2RX_EQ_B2_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_B_WIDTH 16 /* DSP2RX_EQ_B2_B - [15:0] */ - -/* - * R1415 (0x587) - DSP2 RX EQ Band 2 C - */ -#define WM8915_DSP2RX_EQ_B2_C_MASK 0xFFFF /* DSP2RX_EQ_B2_C - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_C_SHIFT 0 /* DSP2RX_EQ_B2_C - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_C_WIDTH 16 /* DSP2RX_EQ_B2_C - [15:0] */ - -/* - * R1416 (0x588) - DSP2 RX EQ Band 2 PG - */ -#define WM8915_DSP2RX_EQ_B2_PG_MASK 0xFFFF /* DSP2RX_EQ_B2_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_PG_SHIFT 0 /* DSP2RX_EQ_B2_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B2_PG_WIDTH 16 /* DSP2RX_EQ_B2_PG - [15:0] */ - -/* - * R1417 (0x589) - DSP2 RX EQ Band 3 A - */ -#define WM8915_DSP2RX_EQ_B3_A_MASK 0xFFFF /* DSP2RX_EQ_B3_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_A_SHIFT 0 /* DSP2RX_EQ_B3_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_A_WIDTH 16 /* DSP2RX_EQ_B3_A - [15:0] */ - -/* - * R1418 (0x58A) - DSP2 RX EQ Band 3 B - */ -#define WM8915_DSP2RX_EQ_B3_B_MASK 0xFFFF /* DSP2RX_EQ_B3_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_B_SHIFT 0 /* DSP2RX_EQ_B3_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_B_WIDTH 16 /* DSP2RX_EQ_B3_B - [15:0] */ - -/* - * R1419 (0x58B) - DSP2 RX EQ Band 3 C - */ -#define WM8915_DSP2RX_EQ_B3_C_MASK 0xFFFF /* DSP2RX_EQ_B3_C - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_C_SHIFT 0 /* DSP2RX_EQ_B3_C - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_C_WIDTH 16 /* DSP2RX_EQ_B3_C - [15:0] */ - -/* - * R1420 (0x58C) - DSP2 RX EQ Band 3 PG - */ -#define WM8915_DSP2RX_EQ_B3_PG_MASK 0xFFFF /* DSP2RX_EQ_B3_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_PG_SHIFT 0 /* DSP2RX_EQ_B3_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B3_PG_WIDTH 16 /* DSP2RX_EQ_B3_PG - [15:0] */ - -/* - * R1421 (0x58D) - DSP2 RX EQ Band 4 A - */ -#define WM8915_DSP2RX_EQ_B4_A_MASK 0xFFFF /* DSP2RX_EQ_B4_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_A_SHIFT 0 /* DSP2RX_EQ_B4_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_A_WIDTH 16 /* DSP2RX_EQ_B4_A - [15:0] */ - -/* - * R1422 (0x58E) - DSP2 RX EQ Band 4 B - */ -#define WM8915_DSP2RX_EQ_B4_B_MASK 0xFFFF /* DSP2RX_EQ_B4_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_B_SHIFT 0 /* DSP2RX_EQ_B4_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_B_WIDTH 16 /* DSP2RX_EQ_B4_B - [15:0] */ - -/* - * R1423 (0x58F) - DSP2 RX EQ Band 4 C - */ -#define WM8915_DSP2RX_EQ_B4_C_MASK 0xFFFF /* DSP2RX_EQ_B4_C - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_C_SHIFT 0 /* DSP2RX_EQ_B4_C - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_C_WIDTH 16 /* DSP2RX_EQ_B4_C - [15:0] */ - -/* - * R1424 (0x590) - DSP2 RX EQ Band 4 PG - */ -#define WM8915_DSP2RX_EQ_B4_PG_MASK 0xFFFF /* DSP2RX_EQ_B4_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_PG_SHIFT 0 /* DSP2RX_EQ_B4_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B4_PG_WIDTH 16 /* DSP2RX_EQ_B4_PG - [15:0] */ - -/* - * R1425 (0x591) - DSP2 RX EQ Band 5 A - */ -#define WM8915_DSP2RX_EQ_B5_A_MASK 0xFFFF /* DSP2RX_EQ_B5_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B5_A_SHIFT 0 /* DSP2RX_EQ_B5_A - [15:0] */ -#define WM8915_DSP2RX_EQ_B5_A_WIDTH 16 /* DSP2RX_EQ_B5_A - [15:0] */ - -/* - * R1426 (0x592) - DSP2 RX EQ Band 5 B - */ -#define WM8915_DSP2RX_EQ_B5_B_MASK 0xFFFF /* DSP2RX_EQ_B5_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B5_B_SHIFT 0 /* DSP2RX_EQ_B5_B - [15:0] */ -#define WM8915_DSP2RX_EQ_B5_B_WIDTH 16 /* DSP2RX_EQ_B5_B - [15:0] */ - -/* - * R1427 (0x593) - DSP2 RX EQ Band 5 PG - */ -#define WM8915_DSP2RX_EQ_B5_PG_MASK 0xFFFF /* DSP2RX_EQ_B5_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B5_PG_SHIFT 0 /* DSP2RX_EQ_B5_PG - [15:0] */ -#define WM8915_DSP2RX_EQ_B5_PG_WIDTH 16 /* DSP2RX_EQ_B5_PG - [15:0] */ - -/* - * R1536 (0x600) - DAC1 Mixer Volumes - */ -#define WM8915_ADCR_DAC1_VOL_MASK 0x03E0 /* ADCR_DAC1_VOL - [9:5] */ -#define WM8915_ADCR_DAC1_VOL_SHIFT 5 /* ADCR_DAC1_VOL - [9:5] */ -#define WM8915_ADCR_DAC1_VOL_WIDTH 5 /* ADCR_DAC1_VOL - [9:5] */ -#define WM8915_ADCL_DAC1_VOL_MASK 0x001F /* ADCL_DAC1_VOL - [4:0] */ -#define WM8915_ADCL_DAC1_VOL_SHIFT 0 /* ADCL_DAC1_VOL - [4:0] */ -#define WM8915_ADCL_DAC1_VOL_WIDTH 5 /* ADCL_DAC1_VOL - [4:0] */ - -/* - * R1537 (0x601) - DAC1 Left Mixer Routing - */ -#define WM8915_ADCR_TO_DAC1L 0x0020 /* ADCR_TO_DAC1L */ -#define WM8915_ADCR_TO_DAC1L_MASK 0x0020 /* ADCR_TO_DAC1L */ -#define WM8915_ADCR_TO_DAC1L_SHIFT 5 /* ADCR_TO_DAC1L */ -#define WM8915_ADCR_TO_DAC1L_WIDTH 1 /* ADCR_TO_DAC1L */ -#define WM8915_ADCL_TO_DAC1L 0x0010 /* ADCL_TO_DAC1L */ -#define WM8915_ADCL_TO_DAC1L_MASK 0x0010 /* ADCL_TO_DAC1L */ -#define WM8915_ADCL_TO_DAC1L_SHIFT 4 /* ADCL_TO_DAC1L */ -#define WM8915_ADCL_TO_DAC1L_WIDTH 1 /* ADCL_TO_DAC1L */ -#define WM8915_DSP2RXL_TO_DAC1L 0x0002 /* DSP2RXL_TO_DAC1L */ -#define WM8915_DSP2RXL_TO_DAC1L_MASK 0x0002 /* DSP2RXL_TO_DAC1L */ -#define WM8915_DSP2RXL_TO_DAC1L_SHIFT 1 /* DSP2RXL_TO_DAC1L */ -#define WM8915_DSP2RXL_TO_DAC1L_WIDTH 1 /* DSP2RXL_TO_DAC1L */ -#define WM8915_DSP1RXL_TO_DAC1L 0x0001 /* DSP1RXL_TO_DAC1L */ -#define WM8915_DSP1RXL_TO_DAC1L_MASK 0x0001 /* DSP1RXL_TO_DAC1L */ -#define WM8915_DSP1RXL_TO_DAC1L_SHIFT 0 /* DSP1RXL_TO_DAC1L */ -#define WM8915_DSP1RXL_TO_DAC1L_WIDTH 1 /* DSP1RXL_TO_DAC1L */ - -/* - * R1538 (0x602) - DAC1 Right Mixer Routing - */ -#define WM8915_ADCR_TO_DAC1R 0x0020 /* ADCR_TO_DAC1R */ -#define WM8915_ADCR_TO_DAC1R_MASK 0x0020 /* ADCR_TO_DAC1R */ -#define WM8915_ADCR_TO_DAC1R_SHIFT 5 /* ADCR_TO_DAC1R */ -#define WM8915_ADCR_TO_DAC1R_WIDTH 1 /* ADCR_TO_DAC1R */ -#define WM8915_ADCL_TO_DAC1R 0x0010 /* ADCL_TO_DAC1R */ -#define WM8915_ADCL_TO_DAC1R_MASK 0x0010 /* ADCL_TO_DAC1R */ -#define WM8915_ADCL_TO_DAC1R_SHIFT 4 /* ADCL_TO_DAC1R */ -#define WM8915_ADCL_TO_DAC1R_WIDTH 1 /* ADCL_TO_DAC1R */ -#define WM8915_DSP2RXR_TO_DAC1R 0x0002 /* DSP2RXR_TO_DAC1R */ -#define WM8915_DSP2RXR_TO_DAC1R_MASK 0x0002 /* DSP2RXR_TO_DAC1R */ -#define WM8915_DSP2RXR_TO_DAC1R_SHIFT 1 /* DSP2RXR_TO_DAC1R */ -#define WM8915_DSP2RXR_TO_DAC1R_WIDTH 1 /* DSP2RXR_TO_DAC1R */ -#define WM8915_DSP1RXR_TO_DAC1R 0x0001 /* DSP1RXR_TO_DAC1R */ -#define WM8915_DSP1RXR_TO_DAC1R_MASK 0x0001 /* DSP1RXR_TO_DAC1R */ -#define WM8915_DSP1RXR_TO_DAC1R_SHIFT 0 /* DSP1RXR_TO_DAC1R */ -#define WM8915_DSP1RXR_TO_DAC1R_WIDTH 1 /* DSP1RXR_TO_DAC1R */ - -/* - * R1539 (0x603) - DAC2 Mixer Volumes - */ -#define WM8915_ADCR_DAC2_VOL_MASK 0x03E0 /* ADCR_DAC2_VOL - [9:5] */ -#define WM8915_ADCR_DAC2_VOL_SHIFT 5 /* ADCR_DAC2_VOL - [9:5] */ -#define WM8915_ADCR_DAC2_VOL_WIDTH 5 /* ADCR_DAC2_VOL - [9:5] */ -#define WM8915_ADCL_DAC2_VOL_MASK 0x001F /* ADCL_DAC2_VOL - [4:0] */ -#define WM8915_ADCL_DAC2_VOL_SHIFT 0 /* ADCL_DAC2_VOL - [4:0] */ -#define WM8915_ADCL_DAC2_VOL_WIDTH 5 /* ADCL_DAC2_VOL - [4:0] */ - -/* - * R1540 (0x604) - DAC2 Left Mixer Routing - */ -#define WM8915_ADCR_TO_DAC2L 0x0020 /* ADCR_TO_DAC2L */ -#define WM8915_ADCR_TO_DAC2L_MASK 0x0020 /* ADCR_TO_DAC2L */ -#define WM8915_ADCR_TO_DAC2L_SHIFT 5 /* ADCR_TO_DAC2L */ -#define WM8915_ADCR_TO_DAC2L_WIDTH 1 /* ADCR_TO_DAC2L */ -#define WM8915_ADCL_TO_DAC2L 0x0010 /* ADCL_TO_DAC2L */ -#define WM8915_ADCL_TO_DAC2L_MASK 0x0010 /* ADCL_TO_DAC2L */ -#define WM8915_ADCL_TO_DAC2L_SHIFT 4 /* ADCL_TO_DAC2L */ -#define WM8915_ADCL_TO_DAC2L_WIDTH 1 /* ADCL_TO_DAC2L */ -#define WM8915_DSP2RXL_TO_DAC2L 0x0002 /* DSP2RXL_TO_DAC2L */ -#define WM8915_DSP2RXL_TO_DAC2L_MASK 0x0002 /* DSP2RXL_TO_DAC2L */ -#define WM8915_DSP2RXL_TO_DAC2L_SHIFT 1 /* DSP2RXL_TO_DAC2L */ -#define WM8915_DSP2RXL_TO_DAC2L_WIDTH 1 /* DSP2RXL_TO_DAC2L */ -#define WM8915_DSP1RXL_TO_DAC2L 0x0001 /* DSP1RXL_TO_DAC2L */ -#define WM8915_DSP1RXL_TO_DAC2L_MASK 0x0001 /* DSP1RXL_TO_DAC2L */ -#define WM8915_DSP1RXL_TO_DAC2L_SHIFT 0 /* DSP1RXL_TO_DAC2L */ -#define WM8915_DSP1RXL_TO_DAC2L_WIDTH 1 /* DSP1RXL_TO_DAC2L */ - -/* - * R1541 (0x605) - DAC2 Right Mixer Routing - */ -#define WM8915_ADCR_TO_DAC2R 0x0020 /* ADCR_TO_DAC2R */ -#define WM8915_ADCR_TO_DAC2R_MASK 0x0020 /* ADCR_TO_DAC2R */ -#define WM8915_ADCR_TO_DAC2R_SHIFT 5 /* ADCR_TO_DAC2R */ -#define WM8915_ADCR_TO_DAC2R_WIDTH 1 /* ADCR_TO_DAC2R */ -#define WM8915_ADCL_TO_DAC2R 0x0010 /* ADCL_TO_DAC2R */ -#define WM8915_ADCL_TO_DAC2R_MASK 0x0010 /* ADCL_TO_DAC2R */ -#define WM8915_ADCL_TO_DAC2R_SHIFT 4 /* ADCL_TO_DAC2R */ -#define WM8915_ADCL_TO_DAC2R_WIDTH 1 /* ADCL_TO_DAC2R */ -#define WM8915_DSP2RXR_TO_DAC2R 0x0002 /* DSP2RXR_TO_DAC2R */ -#define WM8915_DSP2RXR_TO_DAC2R_MASK 0x0002 /* DSP2RXR_TO_DAC2R */ -#define WM8915_DSP2RXR_TO_DAC2R_SHIFT 1 /* DSP2RXR_TO_DAC2R */ -#define WM8915_DSP2RXR_TO_DAC2R_WIDTH 1 /* DSP2RXR_TO_DAC2R */ -#define WM8915_DSP1RXR_TO_DAC2R 0x0001 /* DSP1RXR_TO_DAC2R */ -#define WM8915_DSP1RXR_TO_DAC2R_MASK 0x0001 /* DSP1RXR_TO_DAC2R */ -#define WM8915_DSP1RXR_TO_DAC2R_SHIFT 0 /* DSP1RXR_TO_DAC2R */ -#define WM8915_DSP1RXR_TO_DAC2R_WIDTH 1 /* DSP1RXR_TO_DAC2R */ - -/* - * R1542 (0x606) - DSP1 TX Left Mixer Routing - */ -#define WM8915_ADC1L_TO_DSP1TXL 0x0002 /* ADC1L_TO_DSP1TXL */ -#define WM8915_ADC1L_TO_DSP1TXL_MASK 0x0002 /* ADC1L_TO_DSP1TXL */ -#define WM8915_ADC1L_TO_DSP1TXL_SHIFT 1 /* ADC1L_TO_DSP1TXL */ -#define WM8915_ADC1L_TO_DSP1TXL_WIDTH 1 /* ADC1L_TO_DSP1TXL */ -#define WM8915_DACL_TO_DSP1TXL 0x0001 /* DACL_TO_DSP1TXL */ -#define WM8915_DACL_TO_DSP1TXL_MASK 0x0001 /* DACL_TO_DSP1TXL */ -#define WM8915_DACL_TO_DSP1TXL_SHIFT 0 /* DACL_TO_DSP1TXL */ -#define WM8915_DACL_TO_DSP1TXL_WIDTH 1 /* DACL_TO_DSP1TXL */ - -/* - * R1543 (0x607) - DSP1 TX Right Mixer Routing - */ -#define WM8915_ADC1R_TO_DSP1TXR 0x0002 /* ADC1R_TO_DSP1TXR */ -#define WM8915_ADC1R_TO_DSP1TXR_MASK 0x0002 /* ADC1R_TO_DSP1TXR */ -#define WM8915_ADC1R_TO_DSP1TXR_SHIFT 1 /* ADC1R_TO_DSP1TXR */ -#define WM8915_ADC1R_TO_DSP1TXR_WIDTH 1 /* ADC1R_TO_DSP1TXR */ -#define WM8915_DACR_TO_DSP1TXR 0x0001 /* DACR_TO_DSP1TXR */ -#define WM8915_DACR_TO_DSP1TXR_MASK 0x0001 /* DACR_TO_DSP1TXR */ -#define WM8915_DACR_TO_DSP1TXR_SHIFT 0 /* DACR_TO_DSP1TXR */ -#define WM8915_DACR_TO_DSP1TXR_WIDTH 1 /* DACR_TO_DSP1TXR */ - -/* - * R1544 (0x608) - DSP2 TX Left Mixer Routing - */ -#define WM8915_ADC2L_TO_DSP2TXL 0x0002 /* ADC2L_TO_DSP2TXL */ -#define WM8915_ADC2L_TO_DSP2TXL_MASK 0x0002 /* ADC2L_TO_DSP2TXL */ -#define WM8915_ADC2L_TO_DSP2TXL_SHIFT 1 /* ADC2L_TO_DSP2TXL */ -#define WM8915_ADC2L_TO_DSP2TXL_WIDTH 1 /* ADC2L_TO_DSP2TXL */ -#define WM8915_DACL_TO_DSP2TXL 0x0001 /* DACL_TO_DSP2TXL */ -#define WM8915_DACL_TO_DSP2TXL_MASK 0x0001 /* DACL_TO_DSP2TXL */ -#define WM8915_DACL_TO_DSP2TXL_SHIFT 0 /* DACL_TO_DSP2TXL */ -#define WM8915_DACL_TO_DSP2TXL_WIDTH 1 /* DACL_TO_DSP2TXL */ - -/* - * R1545 (0x609) - DSP2 TX Right Mixer Routing - */ -#define WM8915_ADC2R_TO_DSP2TXR 0x0002 /* ADC2R_TO_DSP2TXR */ -#define WM8915_ADC2R_TO_DSP2TXR_MASK 0x0002 /* ADC2R_TO_DSP2TXR */ -#define WM8915_ADC2R_TO_DSP2TXR_SHIFT 1 /* ADC2R_TO_DSP2TXR */ -#define WM8915_ADC2R_TO_DSP2TXR_WIDTH 1 /* ADC2R_TO_DSP2TXR */ -#define WM8915_DACR_TO_DSP2TXR 0x0001 /* DACR_TO_DSP2TXR */ -#define WM8915_DACR_TO_DSP2TXR_MASK 0x0001 /* DACR_TO_DSP2TXR */ -#define WM8915_DACR_TO_DSP2TXR_SHIFT 0 /* DACR_TO_DSP2TXR */ -#define WM8915_DACR_TO_DSP2TXR_WIDTH 1 /* DACR_TO_DSP2TXR */ - -/* - * R1546 (0x60A) - DSP TX Mixer Select - */ -#define WM8915_DAC_TO_DSPTX_SRC 0x0001 /* DAC_TO_DSPTX_SRC */ -#define WM8915_DAC_TO_DSPTX_SRC_MASK 0x0001 /* DAC_TO_DSPTX_SRC */ -#define WM8915_DAC_TO_DSPTX_SRC_SHIFT 0 /* DAC_TO_DSPTX_SRC */ -#define WM8915_DAC_TO_DSPTX_SRC_WIDTH 1 /* DAC_TO_DSPTX_SRC */ - -/* - * R1552 (0x610) - DAC Softmute - */ -#define WM8915_DAC_SOFTMUTEMODE 0x0002 /* DAC_SOFTMUTEMODE */ -#define WM8915_DAC_SOFTMUTEMODE_MASK 0x0002 /* DAC_SOFTMUTEMODE */ -#define WM8915_DAC_SOFTMUTEMODE_SHIFT 1 /* DAC_SOFTMUTEMODE */ -#define WM8915_DAC_SOFTMUTEMODE_WIDTH 1 /* DAC_SOFTMUTEMODE */ -#define WM8915_DAC_MUTERATE 0x0001 /* DAC_MUTERATE */ -#define WM8915_DAC_MUTERATE_MASK 0x0001 /* DAC_MUTERATE */ -#define WM8915_DAC_MUTERATE_SHIFT 0 /* DAC_MUTERATE */ -#define WM8915_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ - -/* - * R1568 (0x620) - Oversampling - */ -#define WM8915_SPK_OSR128 0x0008 /* SPK_OSR128 */ -#define WM8915_SPK_OSR128_MASK 0x0008 /* SPK_OSR128 */ -#define WM8915_SPK_OSR128_SHIFT 3 /* SPK_OSR128 */ -#define WM8915_SPK_OSR128_WIDTH 1 /* SPK_OSR128 */ -#define WM8915_DMIC_OSR64 0x0004 /* DMIC_OSR64 */ -#define WM8915_DMIC_OSR64_MASK 0x0004 /* DMIC_OSR64 */ -#define WM8915_DMIC_OSR64_SHIFT 2 /* DMIC_OSR64 */ -#define WM8915_DMIC_OSR64_WIDTH 1 /* DMIC_OSR64 */ -#define WM8915_ADC_OSR128 0x0002 /* ADC_OSR128 */ -#define WM8915_ADC_OSR128_MASK 0x0002 /* ADC_OSR128 */ -#define WM8915_ADC_OSR128_SHIFT 1 /* ADC_OSR128 */ -#define WM8915_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */ -#define WM8915_DAC_OSR128 0x0001 /* DAC_OSR128 */ -#define WM8915_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */ -#define WM8915_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */ -#define WM8915_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */ - -/* - * R1569 (0x621) - Sidetone - */ -#define WM8915_ST_LPF 0x1000 /* ST_LPF */ -#define WM8915_ST_LPF_MASK 0x1000 /* ST_LPF */ -#define WM8915_ST_LPF_SHIFT 12 /* ST_LPF */ -#define WM8915_ST_LPF_WIDTH 1 /* ST_LPF */ -#define WM8915_ST_HPF_CUT_MASK 0x0380 /* ST_HPF_CUT - [9:7] */ -#define WM8915_ST_HPF_CUT_SHIFT 7 /* ST_HPF_CUT - [9:7] */ -#define WM8915_ST_HPF_CUT_WIDTH 3 /* ST_HPF_CUT - [9:7] */ -#define WM8915_ST_HPF 0x0040 /* ST_HPF */ -#define WM8915_ST_HPF_MASK 0x0040 /* ST_HPF */ -#define WM8915_ST_HPF_SHIFT 6 /* ST_HPF */ -#define WM8915_ST_HPF_WIDTH 1 /* ST_HPF */ -#define WM8915_STR_SEL 0x0002 /* STR_SEL */ -#define WM8915_STR_SEL_MASK 0x0002 /* STR_SEL */ -#define WM8915_STR_SEL_SHIFT 1 /* STR_SEL */ -#define WM8915_STR_SEL_WIDTH 1 /* STR_SEL */ -#define WM8915_STL_SEL 0x0001 /* STL_SEL */ -#define WM8915_STL_SEL_MASK 0x0001 /* STL_SEL */ -#define WM8915_STL_SEL_SHIFT 0 /* STL_SEL */ -#define WM8915_STL_SEL_WIDTH 1 /* STL_SEL */ - -/* - * R1792 (0x700) - GPIO 1 - */ -#define WM8915_GP1_DIR 0x8000 /* GP1_DIR */ -#define WM8915_GP1_DIR_MASK 0x8000 /* GP1_DIR */ -#define WM8915_GP1_DIR_SHIFT 15 /* GP1_DIR */ -#define WM8915_GP1_DIR_WIDTH 1 /* GP1_DIR */ -#define WM8915_GP1_PU 0x4000 /* GP1_PU */ -#define WM8915_GP1_PU_MASK 0x4000 /* GP1_PU */ -#define WM8915_GP1_PU_SHIFT 14 /* GP1_PU */ -#define WM8915_GP1_PU_WIDTH 1 /* GP1_PU */ -#define WM8915_GP1_PD 0x2000 /* GP1_PD */ -#define WM8915_GP1_PD_MASK 0x2000 /* GP1_PD */ -#define WM8915_GP1_PD_SHIFT 13 /* GP1_PD */ -#define WM8915_GP1_PD_WIDTH 1 /* GP1_PD */ -#define WM8915_GP1_POL 0x0400 /* GP1_POL */ -#define WM8915_GP1_POL_MASK 0x0400 /* GP1_POL */ -#define WM8915_GP1_POL_SHIFT 10 /* GP1_POL */ -#define WM8915_GP1_POL_WIDTH 1 /* GP1_POL */ -#define WM8915_GP1_OP_CFG 0x0200 /* GP1_OP_CFG */ -#define WM8915_GP1_OP_CFG_MASK 0x0200 /* GP1_OP_CFG */ -#define WM8915_GP1_OP_CFG_SHIFT 9 /* GP1_OP_CFG */ -#define WM8915_GP1_OP_CFG_WIDTH 1 /* GP1_OP_CFG */ -#define WM8915_GP1_DB 0x0100 /* GP1_DB */ -#define WM8915_GP1_DB_MASK 0x0100 /* GP1_DB */ -#define WM8915_GP1_DB_SHIFT 8 /* GP1_DB */ -#define WM8915_GP1_DB_WIDTH 1 /* GP1_DB */ -#define WM8915_GP1_LVL 0x0040 /* GP1_LVL */ -#define WM8915_GP1_LVL_MASK 0x0040 /* GP1_LVL */ -#define WM8915_GP1_LVL_SHIFT 6 /* GP1_LVL */ -#define WM8915_GP1_LVL_WIDTH 1 /* GP1_LVL */ -#define WM8915_GP1_FN_MASK 0x000F /* GP1_FN - [3:0] */ -#define WM8915_GP1_FN_SHIFT 0 /* GP1_FN - [3:0] */ -#define WM8915_GP1_FN_WIDTH 4 /* GP1_FN - [3:0] */ - -/* - * R1793 (0x701) - GPIO 2 - */ -#define WM8915_GP2_DIR 0x8000 /* GP2_DIR */ -#define WM8915_GP2_DIR_MASK 0x8000 /* GP2_DIR */ -#define WM8915_GP2_DIR_SHIFT 15 /* GP2_DIR */ -#define WM8915_GP2_DIR_WIDTH 1 /* GP2_DIR */ -#define WM8915_GP2_PU 0x4000 /* GP2_PU */ -#define WM8915_GP2_PU_MASK 0x4000 /* GP2_PU */ -#define WM8915_GP2_PU_SHIFT 14 /* GP2_PU */ -#define WM8915_GP2_PU_WIDTH 1 /* GP2_PU */ -#define WM8915_GP2_PD 0x2000 /* GP2_PD */ -#define WM8915_GP2_PD_MASK 0x2000 /* GP2_PD */ -#define WM8915_GP2_PD_SHIFT 13 /* GP2_PD */ -#define WM8915_GP2_PD_WIDTH 1 /* GP2_PD */ -#define WM8915_GP2_POL 0x0400 /* GP2_POL */ -#define WM8915_GP2_POL_MASK 0x0400 /* GP2_POL */ -#define WM8915_GP2_POL_SHIFT 10 /* GP2_POL */ -#define WM8915_GP2_POL_WIDTH 1 /* GP2_POL */ -#define WM8915_GP2_OP_CFG 0x0200 /* GP2_OP_CFG */ -#define WM8915_GP2_OP_CFG_MASK 0x0200 /* GP2_OP_CFG */ -#define WM8915_GP2_OP_CFG_SHIFT 9 /* GP2_OP_CFG */ -#define WM8915_GP2_OP_CFG_WIDTH 1 /* GP2_OP_CFG */ -#define WM8915_GP2_DB 0x0100 /* GP2_DB */ -#define WM8915_GP2_DB_MASK 0x0100 /* GP2_DB */ -#define WM8915_GP2_DB_SHIFT 8 /* GP2_DB */ -#define WM8915_GP2_DB_WIDTH 1 /* GP2_DB */ -#define WM8915_GP2_LVL 0x0040 /* GP2_LVL */ -#define WM8915_GP2_LVL_MASK 0x0040 /* GP2_LVL */ -#define WM8915_GP2_LVL_SHIFT 6 /* GP2_LVL */ -#define WM8915_GP2_LVL_WIDTH 1 /* GP2_LVL */ -#define WM8915_GP2_FN_MASK 0x000F /* GP2_FN - [3:0] */ -#define WM8915_GP2_FN_SHIFT 0 /* GP2_FN - [3:0] */ -#define WM8915_GP2_FN_WIDTH 4 /* GP2_FN - [3:0] */ - -/* - * R1794 (0x702) - GPIO 3 - */ -#define WM8915_GP3_DIR 0x8000 /* GP3_DIR */ -#define WM8915_GP3_DIR_MASK 0x8000 /* GP3_DIR */ -#define WM8915_GP3_DIR_SHIFT 15 /* GP3_DIR */ -#define WM8915_GP3_DIR_WIDTH 1 /* GP3_DIR */ -#define WM8915_GP3_PU 0x4000 /* GP3_PU */ -#define WM8915_GP3_PU_MASK 0x4000 /* GP3_PU */ -#define WM8915_GP3_PU_SHIFT 14 /* GP3_PU */ -#define WM8915_GP3_PU_WIDTH 1 /* GP3_PU */ -#define WM8915_GP3_PD 0x2000 /* GP3_PD */ -#define WM8915_GP3_PD_MASK 0x2000 /* GP3_PD */ -#define WM8915_GP3_PD_SHIFT 13 /* GP3_PD */ -#define WM8915_GP3_PD_WIDTH 1 /* GP3_PD */ -#define WM8915_GP3_POL 0x0400 /* GP3_POL */ -#define WM8915_GP3_POL_MASK 0x0400 /* GP3_POL */ -#define WM8915_GP3_POL_SHIFT 10 /* GP3_POL */ -#define WM8915_GP3_POL_WIDTH 1 /* GP3_POL */ -#define WM8915_GP3_OP_CFG 0x0200 /* GP3_OP_CFG */ -#define WM8915_GP3_OP_CFG_MASK 0x0200 /* GP3_OP_CFG */ -#define WM8915_GP3_OP_CFG_SHIFT 9 /* GP3_OP_CFG */ -#define WM8915_GP3_OP_CFG_WIDTH 1 /* GP3_OP_CFG */ -#define WM8915_GP3_DB 0x0100 /* GP3_DB */ -#define WM8915_GP3_DB_MASK 0x0100 /* GP3_DB */ -#define WM8915_GP3_DB_SHIFT 8 /* GP3_DB */ -#define WM8915_GP3_DB_WIDTH 1 /* GP3_DB */ -#define WM8915_GP3_LVL 0x0040 /* GP3_LVL */ -#define WM8915_GP3_LVL_MASK 0x0040 /* GP3_LVL */ -#define WM8915_GP3_LVL_SHIFT 6 /* GP3_LVL */ -#define WM8915_GP3_LVL_WIDTH 1 /* GP3_LVL */ -#define WM8915_GP3_FN_MASK 0x000F /* GP3_FN - [3:0] */ -#define WM8915_GP3_FN_SHIFT 0 /* GP3_FN - [3:0] */ -#define WM8915_GP3_FN_WIDTH 4 /* GP3_FN - [3:0] */ - -/* - * R1795 (0x703) - GPIO 4 - */ -#define WM8915_GP4_DIR 0x8000 /* GP4_DIR */ -#define WM8915_GP4_DIR_MASK 0x8000 /* GP4_DIR */ -#define WM8915_GP4_DIR_SHIFT 15 /* GP4_DIR */ -#define WM8915_GP4_DIR_WIDTH 1 /* GP4_DIR */ -#define WM8915_GP4_PU 0x4000 /* GP4_PU */ -#define WM8915_GP4_PU_MASK 0x4000 /* GP4_PU */ -#define WM8915_GP4_PU_SHIFT 14 /* GP4_PU */ -#define WM8915_GP4_PU_WIDTH 1 /* GP4_PU */ -#define WM8915_GP4_PD 0x2000 /* GP4_PD */ -#define WM8915_GP4_PD_MASK 0x2000 /* GP4_PD */ -#define WM8915_GP4_PD_SHIFT 13 /* GP4_PD */ -#define WM8915_GP4_PD_WIDTH 1 /* GP4_PD */ -#define WM8915_GP4_POL 0x0400 /* GP4_POL */ -#define WM8915_GP4_POL_MASK 0x0400 /* GP4_POL */ -#define WM8915_GP4_POL_SHIFT 10 /* GP4_POL */ -#define WM8915_GP4_POL_WIDTH 1 /* GP4_POL */ -#define WM8915_GP4_OP_CFG 0x0200 /* GP4_OP_CFG */ -#define WM8915_GP4_OP_CFG_MASK 0x0200 /* GP4_OP_CFG */ -#define WM8915_GP4_OP_CFG_SHIFT 9 /* GP4_OP_CFG */ -#define WM8915_GP4_OP_CFG_WIDTH 1 /* GP4_OP_CFG */ -#define WM8915_GP4_DB 0x0100 /* GP4_DB */ -#define WM8915_GP4_DB_MASK 0x0100 /* GP4_DB */ -#define WM8915_GP4_DB_SHIFT 8 /* GP4_DB */ -#define WM8915_GP4_DB_WIDTH 1 /* GP4_DB */ -#define WM8915_GP4_LVL 0x0040 /* GP4_LVL */ -#define WM8915_GP4_LVL_MASK 0x0040 /* GP4_LVL */ -#define WM8915_GP4_LVL_SHIFT 6 /* GP4_LVL */ -#define WM8915_GP4_LVL_WIDTH 1 /* GP4_LVL */ -#define WM8915_GP4_FN_MASK 0x000F /* GP4_FN - [3:0] */ -#define WM8915_GP4_FN_SHIFT 0 /* GP4_FN - [3:0] */ -#define WM8915_GP4_FN_WIDTH 4 /* GP4_FN - [3:0] */ - -/* - * R1796 (0x704) - GPIO 5 - */ -#define WM8915_GP5_DIR 0x8000 /* GP5_DIR */ -#define WM8915_GP5_DIR_MASK 0x8000 /* GP5_DIR */ -#define WM8915_GP5_DIR_SHIFT 15 /* GP5_DIR */ -#define WM8915_GP5_DIR_WIDTH 1 /* GP5_DIR */ -#define WM8915_GP5_PU 0x4000 /* GP5_PU */ -#define WM8915_GP5_PU_MASK 0x4000 /* GP5_PU */ -#define WM8915_GP5_PU_SHIFT 14 /* GP5_PU */ -#define WM8915_GP5_PU_WIDTH 1 /* GP5_PU */ -#define WM8915_GP5_PD 0x2000 /* GP5_PD */ -#define WM8915_GP5_PD_MASK 0x2000 /* GP5_PD */ -#define WM8915_GP5_PD_SHIFT 13 /* GP5_PD */ -#define WM8915_GP5_PD_WIDTH 1 /* GP5_PD */ -#define WM8915_GP5_POL 0x0400 /* GP5_POL */ -#define WM8915_GP5_POL_MASK 0x0400 /* GP5_POL */ -#define WM8915_GP5_POL_SHIFT 10 /* GP5_POL */ -#define WM8915_GP5_POL_WIDTH 1 /* GP5_POL */ -#define WM8915_GP5_OP_CFG 0x0200 /* GP5_OP_CFG */ -#define WM8915_GP5_OP_CFG_MASK 0x0200 /* GP5_OP_CFG */ -#define WM8915_GP5_OP_CFG_SHIFT 9 /* GP5_OP_CFG */ -#define WM8915_GP5_OP_CFG_WIDTH 1 /* GP5_OP_CFG */ -#define WM8915_GP5_DB 0x0100 /* GP5_DB */ -#define WM8915_GP5_DB_MASK 0x0100 /* GP5_DB */ -#define WM8915_GP5_DB_SHIFT 8 /* GP5_DB */ -#define WM8915_GP5_DB_WIDTH 1 /* GP5_DB */ -#define WM8915_GP5_LVL 0x0040 /* GP5_LVL */ -#define WM8915_GP5_LVL_MASK 0x0040 /* GP5_LVL */ -#define WM8915_GP5_LVL_SHIFT 6 /* GP5_LVL */ -#define WM8915_GP5_LVL_WIDTH 1 /* GP5_LVL */ -#define WM8915_GP5_FN_MASK 0x000F /* GP5_FN - [3:0] */ -#define WM8915_GP5_FN_SHIFT 0 /* GP5_FN - [3:0] */ -#define WM8915_GP5_FN_WIDTH 4 /* GP5_FN - [3:0] */ - -/* - * R1824 (0x720) - Pull Control (1) - */ -#define WM8915_DMICDAT2_PD 0x1000 /* DMICDAT2_PD */ -#define WM8915_DMICDAT2_PD_MASK 0x1000 /* DMICDAT2_PD */ -#define WM8915_DMICDAT2_PD_SHIFT 12 /* DMICDAT2_PD */ -#define WM8915_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */ -#define WM8915_DMICDAT1_PD 0x0400 /* DMICDAT1_PD */ -#define WM8915_DMICDAT1_PD_MASK 0x0400 /* DMICDAT1_PD */ -#define WM8915_DMICDAT1_PD_SHIFT 10 /* DMICDAT1_PD */ -#define WM8915_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */ -#define WM8915_MCLK2_PU 0x0200 /* MCLK2_PU */ -#define WM8915_MCLK2_PU_MASK 0x0200 /* MCLK2_PU */ -#define WM8915_MCLK2_PU_SHIFT 9 /* MCLK2_PU */ -#define WM8915_MCLK2_PU_WIDTH 1 /* MCLK2_PU */ -#define WM8915_MCLK2_PD 0x0100 /* MCLK2_PD */ -#define WM8915_MCLK2_PD_MASK 0x0100 /* MCLK2_PD */ -#define WM8915_MCLK2_PD_SHIFT 8 /* MCLK2_PD */ -#define WM8915_MCLK2_PD_WIDTH 1 /* MCLK2_PD */ -#define WM8915_MCLK1_PU 0x0080 /* MCLK1_PU */ -#define WM8915_MCLK1_PU_MASK 0x0080 /* MCLK1_PU */ -#define WM8915_MCLK1_PU_SHIFT 7 /* MCLK1_PU */ -#define WM8915_MCLK1_PU_WIDTH 1 /* MCLK1_PU */ -#define WM8915_MCLK1_PD 0x0040 /* MCLK1_PD */ -#define WM8915_MCLK1_PD_MASK 0x0040 /* MCLK1_PD */ -#define WM8915_MCLK1_PD_SHIFT 6 /* MCLK1_PD */ -#define WM8915_MCLK1_PD_WIDTH 1 /* MCLK1_PD */ -#define WM8915_DACDAT1_PU 0x0020 /* DACDAT1_PU */ -#define WM8915_DACDAT1_PU_MASK 0x0020 /* DACDAT1_PU */ -#define WM8915_DACDAT1_PU_SHIFT 5 /* DACDAT1_PU */ -#define WM8915_DACDAT1_PU_WIDTH 1 /* DACDAT1_PU */ -#define WM8915_DACDAT1_PD 0x0010 /* DACDAT1_PD */ -#define WM8915_DACDAT1_PD_MASK 0x0010 /* DACDAT1_PD */ -#define WM8915_DACDAT1_PD_SHIFT 4 /* DACDAT1_PD */ -#define WM8915_DACDAT1_PD_WIDTH 1 /* DACDAT1_PD */ -#define WM8915_DACLRCLK1_PU 0x0008 /* DACLRCLK1_PU */ -#define WM8915_DACLRCLK1_PU_MASK 0x0008 /* DACLRCLK1_PU */ -#define WM8915_DACLRCLK1_PU_SHIFT 3 /* DACLRCLK1_PU */ -#define WM8915_DACLRCLK1_PU_WIDTH 1 /* DACLRCLK1_PU */ -#define WM8915_DACLRCLK1_PD 0x0004 /* DACLRCLK1_PD */ -#define WM8915_DACLRCLK1_PD_MASK 0x0004 /* DACLRCLK1_PD */ -#define WM8915_DACLRCLK1_PD_SHIFT 2 /* DACLRCLK1_PD */ -#define WM8915_DACLRCLK1_PD_WIDTH 1 /* DACLRCLK1_PD */ -#define WM8915_BCLK1_PU 0x0002 /* BCLK1_PU */ -#define WM8915_BCLK1_PU_MASK 0x0002 /* BCLK1_PU */ -#define WM8915_BCLK1_PU_SHIFT 1 /* BCLK1_PU */ -#define WM8915_BCLK1_PU_WIDTH 1 /* BCLK1_PU */ -#define WM8915_BCLK1_PD 0x0001 /* BCLK1_PD */ -#define WM8915_BCLK1_PD_MASK 0x0001 /* BCLK1_PD */ -#define WM8915_BCLK1_PD_SHIFT 0 /* BCLK1_PD */ -#define WM8915_BCLK1_PD_WIDTH 1 /* BCLK1_PD */ - -/* - * R1825 (0x721) - Pull Control (2) - */ -#define WM8915_LDO1ENA_PD 0x0100 /* LDO1ENA_PD */ -#define WM8915_LDO1ENA_PD_MASK 0x0100 /* LDO1ENA_PD */ -#define WM8915_LDO1ENA_PD_SHIFT 8 /* LDO1ENA_PD */ -#define WM8915_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */ -#define WM8915_ADDR_PD 0x0040 /* ADDR_PD */ -#define WM8915_ADDR_PD_MASK 0x0040 /* ADDR_PD */ -#define WM8915_ADDR_PD_SHIFT 6 /* ADDR_PD */ -#define WM8915_ADDR_PD_WIDTH 1 /* ADDR_PD */ -#define WM8915_DACDAT2_PU 0x0020 /* DACDAT2_PU */ -#define WM8915_DACDAT2_PU_MASK 0x0020 /* DACDAT2_PU */ -#define WM8915_DACDAT2_PU_SHIFT 5 /* DACDAT2_PU */ -#define WM8915_DACDAT2_PU_WIDTH 1 /* DACDAT2_PU */ -#define WM8915_DACDAT2_PD 0x0010 /* DACDAT2_PD */ -#define WM8915_DACDAT2_PD_MASK 0x0010 /* DACDAT2_PD */ -#define WM8915_DACDAT2_PD_SHIFT 4 /* DACDAT2_PD */ -#define WM8915_DACDAT2_PD_WIDTH 1 /* DACDAT2_PD */ -#define WM8915_DACLRCLK2_PU 0x0008 /* DACLRCLK2_PU */ -#define WM8915_DACLRCLK2_PU_MASK 0x0008 /* DACLRCLK2_PU */ -#define WM8915_DACLRCLK2_PU_SHIFT 3 /* DACLRCLK2_PU */ -#define WM8915_DACLRCLK2_PU_WIDTH 1 /* DACLRCLK2_PU */ -#define WM8915_DACLRCLK2_PD 0x0004 /* DACLRCLK2_PD */ -#define WM8915_DACLRCLK2_PD_MASK 0x0004 /* DACLRCLK2_PD */ -#define WM8915_DACLRCLK2_PD_SHIFT 2 /* DACLRCLK2_PD */ -#define WM8915_DACLRCLK2_PD_WIDTH 1 /* DACLRCLK2_PD */ -#define WM8915_BCLK2_PU 0x0002 /* BCLK2_PU */ -#define WM8915_BCLK2_PU_MASK 0x0002 /* BCLK2_PU */ -#define WM8915_BCLK2_PU_SHIFT 1 /* BCLK2_PU */ -#define WM8915_BCLK2_PU_WIDTH 1 /* BCLK2_PU */ -#define WM8915_BCLK2_PD 0x0001 /* BCLK2_PD */ -#define WM8915_BCLK2_PD_MASK 0x0001 /* BCLK2_PD */ -#define WM8915_BCLK2_PD_SHIFT 0 /* BCLK2_PD */ -#define WM8915_BCLK2_PD_WIDTH 1 /* BCLK2_PD */ - -/* - * R1840 (0x730) - Interrupt Status 1 - */ -#define WM8915_GP5_EINT 0x0010 /* GP5_EINT */ -#define WM8915_GP5_EINT_MASK 0x0010 /* GP5_EINT */ -#define WM8915_GP5_EINT_SHIFT 4 /* GP5_EINT */ -#define WM8915_GP5_EINT_WIDTH 1 /* GP5_EINT */ -#define WM8915_GP4_EINT 0x0008 /* GP4_EINT */ -#define WM8915_GP4_EINT_MASK 0x0008 /* GP4_EINT */ -#define WM8915_GP4_EINT_SHIFT 3 /* GP4_EINT */ -#define WM8915_GP4_EINT_WIDTH 1 /* GP4_EINT */ -#define WM8915_GP3_EINT 0x0004 /* GP3_EINT */ -#define WM8915_GP3_EINT_MASK 0x0004 /* GP3_EINT */ -#define WM8915_GP3_EINT_SHIFT 2 /* GP3_EINT */ -#define WM8915_GP3_EINT_WIDTH 1 /* GP3_EINT */ -#define WM8915_GP2_EINT 0x0002 /* GP2_EINT */ -#define WM8915_GP2_EINT_MASK 0x0002 /* GP2_EINT */ -#define WM8915_GP2_EINT_SHIFT 1 /* GP2_EINT */ -#define WM8915_GP2_EINT_WIDTH 1 /* GP2_EINT */ -#define WM8915_GP1_EINT 0x0001 /* GP1_EINT */ -#define WM8915_GP1_EINT_MASK 0x0001 /* GP1_EINT */ -#define WM8915_GP1_EINT_SHIFT 0 /* GP1_EINT */ -#define WM8915_GP1_EINT_WIDTH 1 /* GP1_EINT */ - -/* - * R1841 (0x731) - Interrupt Status 2 - */ -#define WM8915_DCS_DONE_23_EINT 0x1000 /* DCS_DONE_23_EINT */ -#define WM8915_DCS_DONE_23_EINT_MASK 0x1000 /* DCS_DONE_23_EINT */ -#define WM8915_DCS_DONE_23_EINT_SHIFT 12 /* DCS_DONE_23_EINT */ -#define WM8915_DCS_DONE_23_EINT_WIDTH 1 /* DCS_DONE_23_EINT */ -#define WM8915_DCS_DONE_01_EINT 0x0800 /* DCS_DONE_01_EINT */ -#define WM8915_DCS_DONE_01_EINT_MASK 0x0800 /* DCS_DONE_01_EINT */ -#define WM8915_DCS_DONE_01_EINT_SHIFT 11 /* DCS_DONE_01_EINT */ -#define WM8915_DCS_DONE_01_EINT_WIDTH 1 /* DCS_DONE_01_EINT */ -#define WM8915_WSEQ_DONE_EINT 0x0400 /* WSEQ_DONE_EINT */ -#define WM8915_WSEQ_DONE_EINT_MASK 0x0400 /* WSEQ_DONE_EINT */ -#define WM8915_WSEQ_DONE_EINT_SHIFT 10 /* WSEQ_DONE_EINT */ -#define WM8915_WSEQ_DONE_EINT_WIDTH 1 /* WSEQ_DONE_EINT */ -#define WM8915_FIFOS_ERR_EINT 0x0200 /* FIFOS_ERR_EINT */ -#define WM8915_FIFOS_ERR_EINT_MASK 0x0200 /* FIFOS_ERR_EINT */ -#define WM8915_FIFOS_ERR_EINT_SHIFT 9 /* FIFOS_ERR_EINT */ -#define WM8915_FIFOS_ERR_EINT_WIDTH 1 /* FIFOS_ERR_EINT */ -#define WM8915_DSP2DRC_SIG_DET_EINT 0x0080 /* DSP2DRC_SIG_DET_EINT */ -#define WM8915_DSP2DRC_SIG_DET_EINT_MASK 0x0080 /* DSP2DRC_SIG_DET_EINT */ -#define WM8915_DSP2DRC_SIG_DET_EINT_SHIFT 7 /* DSP2DRC_SIG_DET_EINT */ -#define WM8915_DSP2DRC_SIG_DET_EINT_WIDTH 1 /* DSP2DRC_SIG_DET_EINT */ -#define WM8915_DSP1DRC_SIG_DET_EINT 0x0040 /* DSP1DRC_SIG_DET_EINT */ -#define WM8915_DSP1DRC_SIG_DET_EINT_MASK 0x0040 /* DSP1DRC_SIG_DET_EINT */ -#define WM8915_DSP1DRC_SIG_DET_EINT_SHIFT 6 /* DSP1DRC_SIG_DET_EINT */ -#define WM8915_DSP1DRC_SIG_DET_EINT_WIDTH 1 /* DSP1DRC_SIG_DET_EINT */ -#define WM8915_FLL_SW_CLK_DONE_EINT 0x0008 /* FLL_SW_CLK_DONE_EINT */ -#define WM8915_FLL_SW_CLK_DONE_EINT_MASK 0x0008 /* FLL_SW_CLK_DONE_EINT */ -#define WM8915_FLL_SW_CLK_DONE_EINT_SHIFT 3 /* FLL_SW_CLK_DONE_EINT */ -#define WM8915_FLL_SW_CLK_DONE_EINT_WIDTH 1 /* FLL_SW_CLK_DONE_EINT */ -#define WM8915_FLL_LOCK_EINT 0x0004 /* FLL_LOCK_EINT */ -#define WM8915_FLL_LOCK_EINT_MASK 0x0004 /* FLL_LOCK_EINT */ -#define WM8915_FLL_LOCK_EINT_SHIFT 2 /* FLL_LOCK_EINT */ -#define WM8915_FLL_LOCK_EINT_WIDTH 1 /* FLL_LOCK_EINT */ -#define WM8915_HP_DONE_EINT 0x0002 /* HP_DONE_EINT */ -#define WM8915_HP_DONE_EINT_MASK 0x0002 /* HP_DONE_EINT */ -#define WM8915_HP_DONE_EINT_SHIFT 1 /* HP_DONE_EINT */ -#define WM8915_HP_DONE_EINT_WIDTH 1 /* HP_DONE_EINT */ -#define WM8915_MICD_EINT 0x0001 /* MICD_EINT */ -#define WM8915_MICD_EINT_MASK 0x0001 /* MICD_EINT */ -#define WM8915_MICD_EINT_SHIFT 0 /* MICD_EINT */ -#define WM8915_MICD_EINT_WIDTH 1 /* MICD_EINT */ - -/* - * R1842 (0x732) - Interrupt Raw Status 2 - */ -#define WM8915_DCS_DONE_23_STS 0x1000 /* DCS_DONE_23_STS */ -#define WM8915_DCS_DONE_23_STS_MASK 0x1000 /* DCS_DONE_23_STS */ -#define WM8915_DCS_DONE_23_STS_SHIFT 12 /* DCS_DONE_23_STS */ -#define WM8915_DCS_DONE_23_STS_WIDTH 1 /* DCS_DONE_23_STS */ -#define WM8915_DCS_DONE_01_STS 0x0800 /* DCS_DONE_01_STS */ -#define WM8915_DCS_DONE_01_STS_MASK 0x0800 /* DCS_DONE_01_STS */ -#define WM8915_DCS_DONE_01_STS_SHIFT 11 /* DCS_DONE_01_STS */ -#define WM8915_DCS_DONE_01_STS_WIDTH 1 /* DCS_DONE_01_STS */ -#define WM8915_WSEQ_DONE_STS 0x0400 /* WSEQ_DONE_STS */ -#define WM8915_WSEQ_DONE_STS_MASK 0x0400 /* WSEQ_DONE_STS */ -#define WM8915_WSEQ_DONE_STS_SHIFT 10 /* WSEQ_DONE_STS */ -#define WM8915_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */ -#define WM8915_FIFOS_ERR_STS 0x0200 /* FIFOS_ERR_STS */ -#define WM8915_FIFOS_ERR_STS_MASK 0x0200 /* FIFOS_ERR_STS */ -#define WM8915_FIFOS_ERR_STS_SHIFT 9 /* FIFOS_ERR_STS */ -#define WM8915_FIFOS_ERR_STS_WIDTH 1 /* FIFOS_ERR_STS */ -#define WM8915_DSP2DRC_SIG_DET_STS 0x0080 /* DSP2DRC_SIG_DET_STS */ -#define WM8915_DSP2DRC_SIG_DET_STS_MASK 0x0080 /* DSP2DRC_SIG_DET_STS */ -#define WM8915_DSP2DRC_SIG_DET_STS_SHIFT 7 /* DSP2DRC_SIG_DET_STS */ -#define WM8915_DSP2DRC_SIG_DET_STS_WIDTH 1 /* DSP2DRC_SIG_DET_STS */ -#define WM8915_DSP1DRC_SIG_DET_STS 0x0040 /* DSP1DRC_SIG_DET_STS */ -#define WM8915_DSP1DRC_SIG_DET_STS_MASK 0x0040 /* DSP1DRC_SIG_DET_STS */ -#define WM8915_DSP1DRC_SIG_DET_STS_SHIFT 6 /* DSP1DRC_SIG_DET_STS */ -#define WM8915_DSP1DRC_SIG_DET_STS_WIDTH 1 /* DSP1DRC_SIG_DET_STS */ -#define WM8915_FLL_LOCK_STS 0x0004 /* FLL_LOCK_STS */ -#define WM8915_FLL_LOCK_STS_MASK 0x0004 /* FLL_LOCK_STS */ -#define WM8915_FLL_LOCK_STS_SHIFT 2 /* FLL_LOCK_STS */ -#define WM8915_FLL_LOCK_STS_WIDTH 1 /* FLL_LOCK_STS */ - -/* - * R1848 (0x738) - Interrupt Status 1 Mask - */ -#define WM8915_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ -#define WM8915_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ -#define WM8915_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ -#define WM8915_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ -#define WM8915_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ -#define WM8915_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ -#define WM8915_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ -#define WM8915_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ -#define WM8915_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ -#define WM8915_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ -#define WM8915_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ -#define WM8915_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ -#define WM8915_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ -#define WM8915_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ -#define WM8915_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ -#define WM8915_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ -#define WM8915_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ -#define WM8915_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ -#define WM8915_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ -#define WM8915_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ - -/* - * R1849 (0x739) - Interrupt Status 2 Mask - */ -#define WM8915_IM_DCS_DONE_23_EINT 0x1000 /* IM_DCS_DONE_23_EINT */ -#define WM8915_IM_DCS_DONE_23_EINT_MASK 0x1000 /* IM_DCS_DONE_23_EINT */ -#define WM8915_IM_DCS_DONE_23_EINT_SHIFT 12 /* IM_DCS_DONE_23_EINT */ -#define WM8915_IM_DCS_DONE_23_EINT_WIDTH 1 /* IM_DCS_DONE_23_EINT */ -#define WM8915_IM_DCS_DONE_01_EINT 0x0800 /* IM_DCS_DONE_01_EINT */ -#define WM8915_IM_DCS_DONE_01_EINT_MASK 0x0800 /* IM_DCS_DONE_01_EINT */ -#define WM8915_IM_DCS_DONE_01_EINT_SHIFT 11 /* IM_DCS_DONE_01_EINT */ -#define WM8915_IM_DCS_DONE_01_EINT_WIDTH 1 /* IM_DCS_DONE_01_EINT */ -#define WM8915_IM_WSEQ_DONE_EINT 0x0400 /* IM_WSEQ_DONE_EINT */ -#define WM8915_IM_WSEQ_DONE_EINT_MASK 0x0400 /* IM_WSEQ_DONE_EINT */ -#define WM8915_IM_WSEQ_DONE_EINT_SHIFT 10 /* IM_WSEQ_DONE_EINT */ -#define WM8915_IM_WSEQ_DONE_EINT_WIDTH 1 /* IM_WSEQ_DONE_EINT */ -#define WM8915_IM_FIFOS_ERR_EINT 0x0200 /* IM_FIFOS_ERR_EINT */ -#define WM8915_IM_FIFOS_ERR_EINT_MASK 0x0200 /* IM_FIFOS_ERR_EINT */ -#define WM8915_IM_FIFOS_ERR_EINT_SHIFT 9 /* IM_FIFOS_ERR_EINT */ -#define WM8915_IM_FIFOS_ERR_EINT_WIDTH 1 /* IM_FIFOS_ERR_EINT */ -#define WM8915_IM_DSP2DRC_SIG_DET_EINT 0x0080 /* IM_DSP2DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP2DRC_SIG_DET_EINT_MASK 0x0080 /* IM_DSP2DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP2DRC_SIG_DET_EINT_SHIFT 7 /* IM_DSP2DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP2DRC_SIG_DET_EINT_WIDTH 1 /* IM_DSP2DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP1DRC_SIG_DET_EINT 0x0040 /* IM_DSP1DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP1DRC_SIG_DET_EINT_MASK 0x0040 /* IM_DSP1DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP1DRC_SIG_DET_EINT_SHIFT 6 /* IM_DSP1DRC_SIG_DET_EINT */ -#define WM8915_IM_DSP1DRC_SIG_DET_EINT_WIDTH 1 /* IM_DSP1DRC_SIG_DET_EINT */ -#define WM8915_IM_FLL_SW_CLK_DONE_EINT 0x0008 /* IM_FLL_SW_CLK_DONE_EINT */ -#define WM8915_IM_FLL_SW_CLK_DONE_EINT_MASK 0x0008 /* IM_FLL_SW_CLK_DONE_EINT */ -#define WM8915_IM_FLL_SW_CLK_DONE_EINT_SHIFT 3 /* IM_FLL_SW_CLK_DONE_EINT */ -#define WM8915_IM_FLL_SW_CLK_DONE_EINT_WIDTH 1 /* IM_FLL_SW_CLK_DONE_EINT */ -#define WM8915_IM_FLL_LOCK_EINT 0x0004 /* IM_FLL_LOCK_EINT */ -#define WM8915_IM_FLL_LOCK_EINT_MASK 0x0004 /* IM_FLL_LOCK_EINT */ -#define WM8915_IM_FLL_LOCK_EINT_SHIFT 2 /* IM_FLL_LOCK_EINT */ -#define WM8915_IM_FLL_LOCK_EINT_WIDTH 1 /* IM_FLL_LOCK_EINT */ -#define WM8915_IM_HP_DONE_EINT 0x0002 /* IM_HP_DONE_EINT */ -#define WM8915_IM_HP_DONE_EINT_MASK 0x0002 /* IM_HP_DONE_EINT */ -#define WM8915_IM_HP_DONE_EINT_SHIFT 1 /* IM_HP_DONE_EINT */ -#define WM8915_IM_HP_DONE_EINT_WIDTH 1 /* IM_HP_DONE_EINT */ -#define WM8915_IM_MICD_EINT 0x0001 /* IM_MICD_EINT */ -#define WM8915_IM_MICD_EINT_MASK 0x0001 /* IM_MICD_EINT */ -#define WM8915_IM_MICD_EINT_SHIFT 0 /* IM_MICD_EINT */ -#define WM8915_IM_MICD_EINT_WIDTH 1 /* IM_MICD_EINT */ - -/* - * R1856 (0x740) - Interrupt Control - */ -#define WM8915_IM_IRQ 0x0001 /* IM_IRQ */ -#define WM8915_IM_IRQ_MASK 0x0001 /* IM_IRQ */ -#define WM8915_IM_IRQ_SHIFT 0 /* IM_IRQ */ -#define WM8915_IM_IRQ_WIDTH 1 /* IM_IRQ */ - -/* - * R2048 (0x800) - Left PDM Speaker - */ -#define WM8915_SPKL_ENA 0x0010 /* SPKL_ENA */ -#define WM8915_SPKL_ENA_MASK 0x0010 /* SPKL_ENA */ -#define WM8915_SPKL_ENA_SHIFT 4 /* SPKL_ENA */ -#define WM8915_SPKL_ENA_WIDTH 1 /* SPKL_ENA */ -#define WM8915_SPKL_MUTE 0x0008 /* SPKL_MUTE */ -#define WM8915_SPKL_MUTE_MASK 0x0008 /* SPKL_MUTE */ -#define WM8915_SPKL_MUTE_SHIFT 3 /* SPKL_MUTE */ -#define WM8915_SPKL_MUTE_WIDTH 1 /* SPKL_MUTE */ -#define WM8915_SPKL_MUTE_ZC 0x0004 /* SPKL_MUTE_ZC */ -#define WM8915_SPKL_MUTE_ZC_MASK 0x0004 /* SPKL_MUTE_ZC */ -#define WM8915_SPKL_MUTE_ZC_SHIFT 2 /* SPKL_MUTE_ZC */ -#define WM8915_SPKL_MUTE_ZC_WIDTH 1 /* SPKL_MUTE_ZC */ -#define WM8915_SPKL_SRC_MASK 0x0003 /* SPKL_SRC - [1:0] */ -#define WM8915_SPKL_SRC_SHIFT 0 /* SPKL_SRC - [1:0] */ -#define WM8915_SPKL_SRC_WIDTH 2 /* SPKL_SRC - [1:0] */ - -/* - * R2049 (0x801) - Right PDM Speaker - */ -#define WM8915_SPKR_ENA 0x0010 /* SPKR_ENA */ -#define WM8915_SPKR_ENA_MASK 0x0010 /* SPKR_ENA */ -#define WM8915_SPKR_ENA_SHIFT 4 /* SPKR_ENA */ -#define WM8915_SPKR_ENA_WIDTH 1 /* SPKR_ENA */ -#define WM8915_SPKR_MUTE 0x0008 /* SPKR_MUTE */ -#define WM8915_SPKR_MUTE_MASK 0x0008 /* SPKR_MUTE */ -#define WM8915_SPKR_MUTE_SHIFT 3 /* SPKR_MUTE */ -#define WM8915_SPKR_MUTE_WIDTH 1 /* SPKR_MUTE */ -#define WM8915_SPKR_MUTE_ZC 0x0004 /* SPKR_MUTE_ZC */ -#define WM8915_SPKR_MUTE_ZC_MASK 0x0004 /* SPKR_MUTE_ZC */ -#define WM8915_SPKR_MUTE_ZC_SHIFT 2 /* SPKR_MUTE_ZC */ -#define WM8915_SPKR_MUTE_ZC_WIDTH 1 /* SPKR_MUTE_ZC */ -#define WM8915_SPKR_SRC_MASK 0x0003 /* SPKR_SRC - [1:0] */ -#define WM8915_SPKR_SRC_SHIFT 0 /* SPKR_SRC - [1:0] */ -#define WM8915_SPKR_SRC_WIDTH 2 /* SPKR_SRC - [1:0] */ - -/* - * R2050 (0x802) - PDM Speaker Mute Sequence - */ -#define WM8915_SPK_MUTE_ENDIAN 0x0100 /* SPK_MUTE_ENDIAN */ -#define WM8915_SPK_MUTE_ENDIAN_MASK 0x0100 /* SPK_MUTE_ENDIAN */ -#define WM8915_SPK_MUTE_ENDIAN_SHIFT 8 /* SPK_MUTE_ENDIAN */ -#define WM8915_SPK_MUTE_ENDIAN_WIDTH 1 /* SPK_MUTE_ENDIAN */ -#define WM8915_SPK_MUTE_SEQ1_MASK 0x00FF /* SPK_MUTE_SEQ1 - [7:0] */ -#define WM8915_SPK_MUTE_SEQ1_SHIFT 0 /* SPK_MUTE_SEQ1 - [7:0] */ -#define WM8915_SPK_MUTE_SEQ1_WIDTH 8 /* SPK_MUTE_SEQ1 - [7:0] */ - -/* - * R2051 (0x803) - PDM Speaker Volume - */ -#define WM8915_SPKR_VOL_MASK 0x00F0 /* SPKR_VOL - [7:4] */ -#define WM8915_SPKR_VOL_SHIFT 4 /* SPKR_VOL - [7:4] */ -#define WM8915_SPKR_VOL_WIDTH 4 /* SPKR_VOL - [7:4] */ -#define WM8915_SPKL_VOL_MASK 0x000F /* SPKL_VOL - [3:0] */ -#define WM8915_SPKL_VOL_SHIFT 0 /* SPKL_VOL - [3:0] */ -#define WM8915_SPKL_VOL_WIDTH 4 /* SPKL_VOL - [3:0] */ - -#endif diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c new file mode 100644 index 0000000..00f9ace --- /dev/null +++ b/sound/soc/codecs/wm8996.c @@ -0,0 +1,2995 @@ +/* + * wm8996.c - WM8996 audio codec interface + * + * Copyright 2011 Wolfson Microelectronics PLC. + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "wm8996.h" + +#define WM8996_AIFS 2 + +#define HPOUT1L 1 +#define HPOUT1R 2 +#define HPOUT2L 4 +#define HPOUT2R 8 + +#define WM8996_NUM_SUPPLIES 4 +static const char *wm8996_supply_names[WM8996_NUM_SUPPLIES] = { + "DBVDD", + "AVDD1", + "AVDD2", + "CPVDD", +}; + +struct wm8996_priv { + struct snd_soc_codec *codec; + + int ldo1ena; + + int sysclk; + int sysclk_src; + + int fll_src; + int fll_fref; + int fll_fout; + + struct completion fll_lock; + + u16 dcs_pending; + struct completion dcs_done; + + u16 hpout_ena; + u16 hpout_pending; + + struct regulator_bulk_data supplies[WM8996_NUM_SUPPLIES]; + struct notifier_block disable_nb[WM8996_NUM_SUPPLIES]; + + struct wm8996_pdata pdata; + + int rx_rate[WM8996_AIFS]; + int bclk_rate[WM8996_AIFS]; + + /* Platform dependant ReTune mobile configuration */ + int num_retune_mobile_texts; + const char **retune_mobile_texts; + int retune_mobile_cfg[2]; + struct soc_enum retune_mobile_enum; + + struct snd_soc_jack *jack; + bool detecting; + bool jack_mic; + wm8996_polarity_fn polarity_cb; + +#ifdef CONFIG_GPIOLIB + struct gpio_chip gpio_chip; +#endif +}; + +/* We can't use the same notifier block for more than one supply and + * there's no way I can see to get from a callback to the caller + * except container_of(). + */ +#define WM8996_REGULATOR_EVENT(n) \ +static int wm8996_regulator_event_##n(struct notifier_block *nb, \ + unsigned long event, void *data) \ +{ \ + struct wm8996_priv *wm8996 = container_of(nb, struct wm8996_priv, \ + disable_nb[n]); \ + if (event & REGULATOR_EVENT_DISABLE) { \ + wm8996->codec->cache_sync = 1; \ + } \ + return 0; \ +} + +WM8996_REGULATOR_EVENT(0) +WM8996_REGULATOR_EVENT(1) +WM8996_REGULATOR_EVENT(2) +WM8996_REGULATOR_EVENT(3) + +static const u16 wm8996_reg[WM8996_MAX_REGISTER] = { + [WM8996_SOFTWARE_RESET] = 0x8996, + [WM8996_POWER_MANAGEMENT_7] = 0x10, + [WM8996_DAC1_HPOUT1_VOLUME] = 0x88, + [WM8996_DAC2_HPOUT2_VOLUME] = 0x88, + [WM8996_DAC1_LEFT_VOLUME] = 0x2c0, + [WM8996_DAC1_RIGHT_VOLUME] = 0x2c0, + [WM8996_DAC2_LEFT_VOLUME] = 0x2c0, + [WM8996_DAC2_RIGHT_VOLUME] = 0x2c0, + [WM8996_OUTPUT1_LEFT_VOLUME] = 0x80, + [WM8996_OUTPUT1_RIGHT_VOLUME] = 0x80, + [WM8996_OUTPUT2_LEFT_VOLUME] = 0x80, + [WM8996_OUTPUT2_RIGHT_VOLUME] = 0x80, + [WM8996_MICBIAS_1] = 0x39, + [WM8996_MICBIAS_2] = 0x39, + [WM8996_LDO_1] = 0x3, + [WM8996_LDO_2] = 0x13, + [WM8996_ACCESSORY_DETECT_MODE_1] = 0x4, + [WM8996_HEADPHONE_DETECT_1] = 0x20, + [WM8996_MIC_DETECT_1] = 0x7600, + [WM8996_MIC_DETECT_2] = 0xbf, + [WM8996_CHARGE_PUMP_1] = 0x1f25, + [WM8996_CHARGE_PUMP_2] = 0xab19, + [WM8996_DC_SERVO_5] = 0x2a2a, + [WM8996_CONTROL_INTERFACE_1] = 0x8004, + [WM8996_CLOCKING_1] = 0x10, + [WM8996_AIF_RATE] = 0x83, + [WM8996_FLL_CONTROL_4] = 0x5dc0, + [WM8996_FLL_CONTROL_5] = 0xc84, + [WM8996_FLL_EFS_2] = 0x2, + [WM8996_AIF1_TX_LRCLK_1] = 0x80, + [WM8996_AIF1_TX_LRCLK_2] = 0x8, + [WM8996_AIF1_RX_LRCLK_1] = 0x80, + [WM8996_AIF1TX_DATA_CONFIGURATION_1] = 0x1818, + [WM8996_AIF1RX_DATA_CONFIGURATION] = 0x1818, + [WM8996_AIF1TX_TEST] = 0x7, + [WM8996_AIF2_TX_LRCLK_1] = 0x80, + [WM8996_AIF2_TX_LRCLK_2] = 0x8, + [WM8996_AIF2_RX_LRCLK_1] = 0x80, + [WM8996_AIF2TX_DATA_CONFIGURATION_1] = 0x1818, + [WM8996_AIF2RX_DATA_CONFIGURATION] = 0x1818, + [WM8996_AIF2TX_TEST] = 0x1, + [WM8996_DSP1_TX_LEFT_VOLUME] = 0xc0, + [WM8996_DSP1_TX_RIGHT_VOLUME] = 0xc0, + [WM8996_DSP1_RX_LEFT_VOLUME] = 0xc0, + [WM8996_DSP1_RX_RIGHT_VOLUME] = 0xc0, + [WM8996_DSP1_TX_FILTERS] = 0x2000, + [WM8996_DSP1_RX_FILTERS_1] = 0x200, + [WM8996_DSP1_RX_FILTERS_2] = 0x10, + [WM8996_DSP1_DRC_1] = 0x98, + [WM8996_DSP1_DRC_2] = 0x845, + [WM8996_DSP1_RX_EQ_GAINS_1] = 0x6318, + [WM8996_DSP1_RX_EQ_GAINS_2] = 0x6300, + [WM8996_DSP1_RX_EQ_BAND_1_A] = 0xfca, + [WM8996_DSP1_RX_EQ_BAND_1_B] = 0x400, + [WM8996_DSP1_RX_EQ_BAND_1_PG] = 0xd8, + [WM8996_DSP1_RX_EQ_BAND_2_A] = 0x1eb5, + [WM8996_DSP1_RX_EQ_BAND_2_B] = 0xf145, + [WM8996_DSP1_RX_EQ_BAND_2_C] = 0xb75, + [WM8996_DSP1_RX_EQ_BAND_2_PG] = 0x1c5, + [WM8996_DSP1_RX_EQ_BAND_3_A] = 0x1c58, + [WM8996_DSP1_RX_EQ_BAND_3_B] = 0xf373, + [WM8996_DSP1_RX_EQ_BAND_3_C] = 0xa54, + [WM8996_DSP1_RX_EQ_BAND_3_PG] = 0x558, + [WM8996_DSP1_RX_EQ_BAND_4_A] = 0x168e, + [WM8996_DSP1_RX_EQ_BAND_4_B] = 0xf829, + [WM8996_DSP1_RX_EQ_BAND_4_C] = 0x7ad, + [WM8996_DSP1_RX_EQ_BAND_4_PG] = 0x1103, + [WM8996_DSP1_RX_EQ_BAND_5_A] = 0x564, + [WM8996_DSP1_RX_EQ_BAND_5_B] = 0x559, + [WM8996_DSP1_RX_EQ_BAND_5_PG] = 0x4000, + [WM8996_DSP2_TX_LEFT_VOLUME] = 0xc0, + [WM8996_DSP2_TX_RIGHT_VOLUME] = 0xc0, + [WM8996_DSP2_RX_LEFT_VOLUME] = 0xc0, + [WM8996_DSP2_RX_RIGHT_VOLUME] = 0xc0, + [WM8996_DSP2_TX_FILTERS] = 0x2000, + [WM8996_DSP2_RX_FILTERS_1] = 0x200, + [WM8996_DSP2_RX_FILTERS_2] = 0x10, + [WM8996_DSP2_DRC_1] = 0x98, + [WM8996_DSP2_DRC_2] = 0x845, + [WM8996_DSP2_RX_EQ_GAINS_1] = 0x6318, + [WM8996_DSP2_RX_EQ_GAINS_2] = 0x6300, + [WM8996_DSP2_RX_EQ_BAND_1_A] = 0xfca, + [WM8996_DSP2_RX_EQ_BAND_1_B] = 0x400, + [WM8996_DSP2_RX_EQ_BAND_1_PG] = 0xd8, + [WM8996_DSP2_RX_EQ_BAND_2_A] = 0x1eb5, + [WM8996_DSP2_RX_EQ_BAND_2_B] = 0xf145, + [WM8996_DSP2_RX_EQ_BAND_2_C] = 0xb75, + [WM8996_DSP2_RX_EQ_BAND_2_PG] = 0x1c5, + [WM8996_DSP2_RX_EQ_BAND_3_A] = 0x1c58, + [WM8996_DSP2_RX_EQ_BAND_3_B] = 0xf373, + [WM8996_DSP2_RX_EQ_BAND_3_C] = 0xa54, + [WM8996_DSP2_RX_EQ_BAND_3_PG] = 0x558, + [WM8996_DSP2_RX_EQ_BAND_4_A] = 0x168e, + [WM8996_DSP2_RX_EQ_BAND_4_B] = 0xf829, + [WM8996_DSP2_RX_EQ_BAND_4_C] = 0x7ad, + [WM8996_DSP2_RX_EQ_BAND_4_PG] = 0x1103, + [WM8996_DSP2_RX_EQ_BAND_5_A] = 0x564, + [WM8996_DSP2_RX_EQ_BAND_5_B] = 0x559, + [WM8996_DSP2_RX_EQ_BAND_5_PG] = 0x4000, + [WM8996_OVERSAMPLING] = 0xd, + [WM8996_SIDETONE] = 0x1040, + [WM8996_GPIO_1] = 0xa101, + [WM8996_GPIO_2] = 0xa101, + [WM8996_GPIO_3] = 0xa101, + [WM8996_GPIO_4] = 0xa101, + [WM8996_GPIO_5] = 0xa101, + [WM8996_PULL_CONTROL_2] = 0x140, + [WM8996_INTERRUPT_STATUS_1_MASK] = 0x1f, + [WM8996_INTERRUPT_STATUS_2_MASK] = 0x1ecf, + [WM8996_RIGHT_PDM_SPEAKER] = 0x1, + [WM8996_PDM_SPEAKER_MUTE_SEQUENCE] = 0x69, + [WM8996_PDM_SPEAKER_VOLUME] = 0x66, + [WM8996_WRITE_SEQUENCER_0] = 0x1, + [WM8996_WRITE_SEQUENCER_1] = 0x1, + [WM8996_WRITE_SEQUENCER_3] = 0x6, + [WM8996_WRITE_SEQUENCER_4] = 0x40, + [WM8996_WRITE_SEQUENCER_5] = 0x1, + [WM8996_WRITE_SEQUENCER_6] = 0xf, + [WM8996_WRITE_SEQUENCER_7] = 0x6, + [WM8996_WRITE_SEQUENCER_8] = 0x1, + [WM8996_WRITE_SEQUENCER_9] = 0x3, + [WM8996_WRITE_SEQUENCER_10] = 0x104, + [WM8996_WRITE_SEQUENCER_12] = 0x60, + [WM8996_WRITE_SEQUENCER_13] = 0x11, + [WM8996_WRITE_SEQUENCER_14] = 0x401, + [WM8996_WRITE_SEQUENCER_16] = 0x50, + [WM8996_WRITE_SEQUENCER_17] = 0x3, + [WM8996_WRITE_SEQUENCER_18] = 0x100, + [WM8996_WRITE_SEQUENCER_20] = 0x51, + [WM8996_WRITE_SEQUENCER_21] = 0x3, + [WM8996_WRITE_SEQUENCER_22] = 0x104, + [WM8996_WRITE_SEQUENCER_23] = 0xa, + [WM8996_WRITE_SEQUENCER_24] = 0x60, + [WM8996_WRITE_SEQUENCER_25] = 0x3b, + [WM8996_WRITE_SEQUENCER_26] = 0x502, + [WM8996_WRITE_SEQUENCER_27] = 0x100, + [WM8996_WRITE_SEQUENCER_28] = 0x2fff, + [WM8996_WRITE_SEQUENCER_32] = 0x2fff, + [WM8996_WRITE_SEQUENCER_36] = 0x2fff, + [WM8996_WRITE_SEQUENCER_40] = 0x2fff, + [WM8996_WRITE_SEQUENCER_44] = 0x2fff, + [WM8996_WRITE_SEQUENCER_48] = 0x2fff, + [WM8996_WRITE_SEQUENCER_52] = 0x2fff, + [WM8996_WRITE_SEQUENCER_56] = 0x2fff, + [WM8996_WRITE_SEQUENCER_60] = 0x2fff, + [WM8996_WRITE_SEQUENCER_64] = 0x1, + [WM8996_WRITE_SEQUENCER_65] = 0x1, + [WM8996_WRITE_SEQUENCER_67] = 0x6, + [WM8996_WRITE_SEQUENCER_68] = 0x40, + [WM8996_WRITE_SEQUENCER_69] = 0x1, + [WM8996_WRITE_SEQUENCER_70] = 0xf, + [WM8996_WRITE_SEQUENCER_71] = 0x6, + [WM8996_WRITE_SEQUENCER_72] = 0x1, + [WM8996_WRITE_SEQUENCER_73] = 0x3, + [WM8996_WRITE_SEQUENCER_74] = 0x104, + [WM8996_WRITE_SEQUENCER_76] = 0x60, + [WM8996_WRITE_SEQUENCER_77] = 0x11, + [WM8996_WRITE_SEQUENCER_78] = 0x401, + [WM8996_WRITE_SEQUENCER_80] = 0x50, + [WM8996_WRITE_SEQUENCER_81] = 0x3, + [WM8996_WRITE_SEQUENCER_82] = 0x100, + [WM8996_WRITE_SEQUENCER_84] = 0x60, + [WM8996_WRITE_SEQUENCER_85] = 0x3b, + [WM8996_WRITE_SEQUENCER_86] = 0x502, + [WM8996_WRITE_SEQUENCER_87] = 0x100, + [WM8996_WRITE_SEQUENCER_88] = 0x2fff, + [WM8996_WRITE_SEQUENCER_92] = 0x2fff, + [WM8996_WRITE_SEQUENCER_96] = 0x2fff, + [WM8996_WRITE_SEQUENCER_100] = 0x2fff, + [WM8996_WRITE_SEQUENCER_104] = 0x2fff, + [WM8996_WRITE_SEQUENCER_108] = 0x2fff, + [WM8996_WRITE_SEQUENCER_112] = 0x2fff, + [WM8996_WRITE_SEQUENCER_116] = 0x2fff, + [WM8996_WRITE_SEQUENCER_120] = 0x2fff, + [WM8996_WRITE_SEQUENCER_124] = 0x2fff, + [WM8996_WRITE_SEQUENCER_128] = 0x1, + [WM8996_WRITE_SEQUENCER_129] = 0x1, + [WM8996_WRITE_SEQUENCER_131] = 0x6, + [WM8996_WRITE_SEQUENCER_132] = 0x40, + [WM8996_WRITE_SEQUENCER_133] = 0x1, + [WM8996_WRITE_SEQUENCER_134] = 0xf, + [WM8996_WRITE_SEQUENCER_135] = 0x6, + [WM8996_WRITE_SEQUENCER_136] = 0x1, + [WM8996_WRITE_SEQUENCER_137] = 0x3, + [WM8996_WRITE_SEQUENCER_138] = 0x106, + [WM8996_WRITE_SEQUENCER_140] = 0x61, + [WM8996_WRITE_SEQUENCER_141] = 0x11, + [WM8996_WRITE_SEQUENCER_142] = 0x401, + [WM8996_WRITE_SEQUENCER_144] = 0x50, + [WM8996_WRITE_SEQUENCER_145] = 0x3, + [WM8996_WRITE_SEQUENCER_146] = 0x102, + [WM8996_WRITE_SEQUENCER_148] = 0x51, + [WM8996_WRITE_SEQUENCER_149] = 0x3, + [WM8996_WRITE_SEQUENCER_150] = 0x106, + [WM8996_WRITE_SEQUENCER_151] = 0xa, + [WM8996_WRITE_SEQUENCER_152] = 0x61, + [WM8996_WRITE_SEQUENCER_153] = 0x3b, + [WM8996_WRITE_SEQUENCER_154] = 0x502, + [WM8996_WRITE_SEQUENCER_155] = 0x100, + [WM8996_WRITE_SEQUENCER_156] = 0x2fff, + [WM8996_WRITE_SEQUENCER_160] = 0x2fff, + [WM8996_WRITE_SEQUENCER_164] = 0x2fff, + [WM8996_WRITE_SEQUENCER_168] = 0x2fff, + [WM8996_WRITE_SEQUENCER_172] = 0x2fff, + [WM8996_WRITE_SEQUENCER_176] = 0x2fff, + [WM8996_WRITE_SEQUENCER_180] = 0x2fff, + [WM8996_WRITE_SEQUENCER_184] = 0x2fff, + [WM8996_WRITE_SEQUENCER_188] = 0x2fff, + [WM8996_WRITE_SEQUENCER_192] = 0x1, + [WM8996_WRITE_SEQUENCER_193] = 0x1, + [WM8996_WRITE_SEQUENCER_195] = 0x6, + [WM8996_WRITE_SEQUENCER_196] = 0x40, + [WM8996_WRITE_SEQUENCER_197] = 0x1, + [WM8996_WRITE_SEQUENCER_198] = 0xf, + [WM8996_WRITE_SEQUENCER_199] = 0x6, + [WM8996_WRITE_SEQUENCER_200] = 0x1, + [WM8996_WRITE_SEQUENCER_201] = 0x3, + [WM8996_WRITE_SEQUENCER_202] = 0x106, + [WM8996_WRITE_SEQUENCER_204] = 0x61, + [WM8996_WRITE_SEQUENCER_205] = 0x11, + [WM8996_WRITE_SEQUENCER_206] = 0x401, + [WM8996_WRITE_SEQUENCER_208] = 0x50, + [WM8996_WRITE_SEQUENCER_209] = 0x3, + [WM8996_WRITE_SEQUENCER_210] = 0x102, + [WM8996_WRITE_SEQUENCER_212] = 0x61, + [WM8996_WRITE_SEQUENCER_213] = 0x3b, + [WM8996_WRITE_SEQUENCER_214] = 0x502, + [WM8996_WRITE_SEQUENCER_215] = 0x100, + [WM8996_WRITE_SEQUENCER_216] = 0x2fff, + [WM8996_WRITE_SEQUENCER_220] = 0x2fff, + [WM8996_WRITE_SEQUENCER_224] = 0x2fff, + [WM8996_WRITE_SEQUENCER_228] = 0x2fff, + [WM8996_WRITE_SEQUENCER_232] = 0x2fff, + [WM8996_WRITE_SEQUENCER_236] = 0x2fff, + [WM8996_WRITE_SEQUENCER_240] = 0x2fff, + [WM8996_WRITE_SEQUENCER_244] = 0x2fff, + [WM8996_WRITE_SEQUENCER_248] = 0x2fff, + [WM8996_WRITE_SEQUENCER_252] = 0x2fff, + [WM8996_WRITE_SEQUENCER_256] = 0x60, + [WM8996_WRITE_SEQUENCER_258] = 0x601, + [WM8996_WRITE_SEQUENCER_260] = 0x50, + [WM8996_WRITE_SEQUENCER_262] = 0x100, + [WM8996_WRITE_SEQUENCER_264] = 0x1, + [WM8996_WRITE_SEQUENCER_266] = 0x104, + [WM8996_WRITE_SEQUENCER_267] = 0x100, + [WM8996_WRITE_SEQUENCER_268] = 0x2fff, + [WM8996_WRITE_SEQUENCER_272] = 0x2fff, + [WM8996_WRITE_SEQUENCER_276] = 0x2fff, + [WM8996_WRITE_SEQUENCER_280] = 0x2fff, + [WM8996_WRITE_SEQUENCER_284] = 0x2fff, + [WM8996_WRITE_SEQUENCER_288] = 0x2fff, + [WM8996_WRITE_SEQUENCER_292] = 0x2fff, + [WM8996_WRITE_SEQUENCER_296] = 0x2fff, + [WM8996_WRITE_SEQUENCER_300] = 0x2fff, + [WM8996_WRITE_SEQUENCER_304] = 0x2fff, + [WM8996_WRITE_SEQUENCER_308] = 0x2fff, + [WM8996_WRITE_SEQUENCER_312] = 0x2fff, + [WM8996_WRITE_SEQUENCER_316] = 0x2fff, + [WM8996_WRITE_SEQUENCER_320] = 0x61, + [WM8996_WRITE_SEQUENCER_322] = 0x601, + [WM8996_WRITE_SEQUENCER_324] = 0x50, + [WM8996_WRITE_SEQUENCER_326] = 0x102, + [WM8996_WRITE_SEQUENCER_328] = 0x1, + [WM8996_WRITE_SEQUENCER_330] = 0x106, + [WM8996_WRITE_SEQUENCER_331] = 0x100, + [WM8996_WRITE_SEQUENCER_332] = 0x2fff, + [WM8996_WRITE_SEQUENCER_336] = 0x2fff, + [WM8996_WRITE_SEQUENCER_340] = 0x2fff, + [WM8996_WRITE_SEQUENCER_344] = 0x2fff, + [WM8996_WRITE_SEQUENCER_348] = 0x2fff, + [WM8996_WRITE_SEQUENCER_352] = 0x2fff, + [WM8996_WRITE_SEQUENCER_356] = 0x2fff, + [WM8996_WRITE_SEQUENCER_360] = 0x2fff, + [WM8996_WRITE_SEQUENCER_364] = 0x2fff, + [WM8996_WRITE_SEQUENCER_368] = 0x2fff, + [WM8996_WRITE_SEQUENCER_372] = 0x2fff, + [WM8996_WRITE_SEQUENCER_376] = 0x2fff, + [WM8996_WRITE_SEQUENCER_380] = 0x2fff, + [WM8996_WRITE_SEQUENCER_384] = 0x60, + [WM8996_WRITE_SEQUENCER_386] = 0x601, + [WM8996_WRITE_SEQUENCER_388] = 0x61, + [WM8996_WRITE_SEQUENCER_390] = 0x601, + [WM8996_WRITE_SEQUENCER_392] = 0x50, + [WM8996_WRITE_SEQUENCER_394] = 0x300, + [WM8996_WRITE_SEQUENCER_396] = 0x1, + [WM8996_WRITE_SEQUENCER_398] = 0x304, + [WM8996_WRITE_SEQUENCER_400] = 0x40, + [WM8996_WRITE_SEQUENCER_402] = 0xf, + [WM8996_WRITE_SEQUENCER_404] = 0x1, + [WM8996_WRITE_SEQUENCER_407] = 0x100, +}; + +static const DECLARE_TLV_DB_SCALE(inpga_tlv, 0, 100, 0); +static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0); +static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1); +static const DECLARE_TLV_DB_SCALE(out_digital_tlv, -1200, 150, 0); +static const DECLARE_TLV_DB_SCALE(out_tlv, -900, 75, 0); +static const DECLARE_TLV_DB_SCALE(spk_tlv, -900, 150, 0); +static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); + +static const char *sidetone_hpf_text[] = { + "2.9kHz", "1.5kHz", "735Hz", "403Hz", "196Hz", "98Hz", "49Hz" +}; + +static const struct soc_enum sidetone_hpf = + SOC_ENUM_SINGLE(WM8996_SIDETONE, 7, 6, sidetone_hpf_text); + +static const char *hpf_mode_text[] = { + "HiFi", "Custom", "Voice" +}; + +static const struct soc_enum dsp1tx_hpf_mode = + SOC_ENUM_SINGLE(WM8996_DSP1_TX_FILTERS, 3, 3, hpf_mode_text); + +static const struct soc_enum dsp2tx_hpf_mode = + SOC_ENUM_SINGLE(WM8996_DSP2_TX_FILTERS, 3, 3, hpf_mode_text); + +static const char *hpf_cutoff_text[] = { + "50Hz", "75Hz", "100Hz", "150Hz", "200Hz", "300Hz", "400Hz" +}; + +static const struct soc_enum dsp1tx_hpf_cutoff = + SOC_ENUM_SINGLE(WM8996_DSP1_TX_FILTERS, 0, 7, hpf_cutoff_text); + +static const struct soc_enum dsp2tx_hpf_cutoff = + SOC_ENUM_SINGLE(WM8996_DSP2_TX_FILTERS, 0, 7, hpf_cutoff_text); + +static void wm8996_set_retune_mobile(struct snd_soc_codec *codec, int block) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + struct wm8996_pdata *pdata = &wm8996->pdata; + int base, best, best_val, save, i, cfg, iface; + + if (!wm8996->num_retune_mobile_texts) + return; + + switch (block) { + case 0: + base = WM8996_DSP1_RX_EQ_GAINS_1; + if (snd_soc_read(codec, WM8996_POWER_MANAGEMENT_8) & + WM8996_DSP1RX_SRC) + iface = 1; + else + iface = 0; + break; + case 1: + base = WM8996_DSP1_RX_EQ_GAINS_2; + if (snd_soc_read(codec, WM8996_POWER_MANAGEMENT_8) & + WM8996_DSP2RX_SRC) + iface = 1; + else + iface = 0; + break; + default: + return; + } + + /* Find the version of the currently selected configuration + * with the nearest sample rate. */ + cfg = wm8996->retune_mobile_cfg[block]; + best = 0; + best_val = INT_MAX; + for (i = 0; i < pdata->num_retune_mobile_cfgs; i++) { + if (strcmp(pdata->retune_mobile_cfgs[i].name, + wm8996->retune_mobile_texts[cfg]) == 0 && + abs(pdata->retune_mobile_cfgs[i].rate + - wm8996->rx_rate[iface]) < best_val) { + best = i; + best_val = abs(pdata->retune_mobile_cfgs[i].rate + - wm8996->rx_rate[iface]); + } + } + + dev_dbg(codec->dev, "ReTune Mobile %d %s/%dHz for %dHz sample rate\n", + block, + pdata->retune_mobile_cfgs[best].name, + pdata->retune_mobile_cfgs[best].rate, + wm8996->rx_rate[iface]); + + /* The EQ will be disabled while reconfiguring it, remember the + * current configuration. + */ + save = snd_soc_read(codec, base); + save &= WM8996_DSP1RX_EQ_ENA; + + for (i = 0; i < ARRAY_SIZE(pdata->retune_mobile_cfgs[best].regs); i++) + snd_soc_update_bits(codec, base + i, 0xffff, + pdata->retune_mobile_cfgs[best].regs[i]); + + snd_soc_update_bits(codec, base, WM8996_DSP1RX_EQ_ENA, save); +} + +/* Icky as hell but saves code duplication */ +static int wm8996_get_retune_mobile_block(const char *name) +{ + if (strcmp(name, "DSP1 EQ Mode") == 0) + return 0; + if (strcmp(name, "DSP2 EQ Mode") == 0) + return 1; + return -EINVAL; +} + +static int wm8996_put_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + struct wm8996_pdata *pdata = &wm8996->pdata; + int block = wm8996_get_retune_mobile_block(kcontrol->id.name); + int value = ucontrol->value.integer.value[0]; + + if (block < 0) + return block; + + if (value >= pdata->num_retune_mobile_cfgs) + return -EINVAL; + + wm8996->retune_mobile_cfg[block] = value; + + wm8996_set_retune_mobile(codec, block); + + return 0; +} + +static int wm8996_get_retune_mobile_enum(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int block = wm8996_get_retune_mobile_block(kcontrol->id.name); + + ucontrol->value.enumerated.item[0] = wm8996->retune_mobile_cfg[block]; + + return 0; +} + +static const struct snd_kcontrol_new wm8996_snd_controls[] = { +SOC_DOUBLE_R_TLV("Capture Volume", WM8996_LEFT_LINE_INPUT_VOLUME, + WM8996_RIGHT_LINE_INPUT_VOLUME, 0, 31, 0, inpga_tlv), +SOC_DOUBLE_R("Capture ZC Switch", WM8996_LEFT_LINE_INPUT_VOLUME, + WM8996_RIGHT_LINE_INPUT_VOLUME, 5, 1, 0), + +SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8996_DAC1_MIXER_VOLUMES, + 0, 5, 24, 0, sidetone_tlv), +SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8996_DAC2_MIXER_VOLUMES, + 0, 5, 24, 0, sidetone_tlv), +SOC_SINGLE("Sidetone LPF Switch", WM8996_SIDETONE, 12, 1, 0), +SOC_ENUM("Sidetone HPF Cut-off", sidetone_hpf), +SOC_SINGLE("Sidetone HPF Switch", WM8996_SIDETONE, 6, 1, 0), + +SOC_DOUBLE_R_TLV("DSP1 Capture Volume", WM8996_DSP1_TX_LEFT_VOLUME, + WM8996_DSP1_TX_RIGHT_VOLUME, 1, 96, 0, digital_tlv), +SOC_DOUBLE_R_TLV("DSP2 Capture Volume", WM8996_DSP2_TX_LEFT_VOLUME, + WM8996_DSP2_TX_RIGHT_VOLUME, 1, 96, 0, digital_tlv), + +SOC_SINGLE("DSP1 Capture Notch Filter Switch", WM8996_DSP1_TX_FILTERS, + 13, 1, 0), +SOC_DOUBLE("DSP1 Capture HPF Switch", WM8996_DSP1_TX_FILTERS, 12, 11, 1, 0), +SOC_ENUM("DSP1 Capture HPF Mode", dsp1tx_hpf_mode), +SOC_ENUM("DSP1 Capture HPF Cutoff", dsp1tx_hpf_cutoff), + +SOC_SINGLE("DSP2 Capture Notch Filter Switch", WM8996_DSP2_TX_FILTERS, + 13, 1, 0), +SOC_DOUBLE("DSP2 Capture HPF Switch", WM8996_DSP2_TX_FILTERS, 12, 11, 1, 0), +SOC_ENUM("DSP2 Capture HPF Mode", dsp2tx_hpf_mode), +SOC_ENUM("DSP2 Capture HPF Cutoff", dsp2tx_hpf_cutoff), + +SOC_DOUBLE_R_TLV("DSP1 Playback Volume", WM8996_DSP1_RX_LEFT_VOLUME, + WM8996_DSP1_RX_RIGHT_VOLUME, 1, 112, 0, digital_tlv), +SOC_SINGLE("DSP1 Playback Switch", WM8996_DSP1_RX_FILTERS_1, 9, 1, 1), + +SOC_DOUBLE_R_TLV("DSP2 Playback Volume", WM8996_DSP2_RX_LEFT_VOLUME, + WM8996_DSP2_RX_RIGHT_VOLUME, 1, 112, 0, digital_tlv), +SOC_SINGLE("DSP2 Playback Switch", WM8996_DSP2_RX_FILTERS_1, 9, 1, 1), + +SOC_DOUBLE_R_TLV("DAC1 Volume", WM8996_DAC1_LEFT_VOLUME, + WM8996_DAC1_RIGHT_VOLUME, 1, 112, 0, digital_tlv), +SOC_DOUBLE_R("DAC1 Switch", WM8996_DAC1_LEFT_VOLUME, + WM8996_DAC1_RIGHT_VOLUME, 9, 1, 1), + +SOC_DOUBLE_R_TLV("DAC2 Volume", WM8996_DAC2_LEFT_VOLUME, + WM8996_DAC2_RIGHT_VOLUME, 1, 112, 0, digital_tlv), +SOC_DOUBLE_R("DAC2 Switch", WM8996_DAC2_LEFT_VOLUME, + WM8996_DAC2_RIGHT_VOLUME, 9, 1, 1), + +SOC_SINGLE("Speaker High Performance Switch", WM8996_OVERSAMPLING, 3, 1, 0), +SOC_SINGLE("DMIC High Performance Switch", WM8996_OVERSAMPLING, 2, 1, 0), +SOC_SINGLE("ADC High Performance Switch", WM8996_OVERSAMPLING, 1, 1, 0), +SOC_SINGLE("DAC High Performance Switch", WM8996_OVERSAMPLING, 0, 1, 0), + +SOC_SINGLE("DAC Soft Mute Switch", WM8996_DAC_SOFTMUTE, 1, 1, 0), +SOC_SINGLE("DAC Slow Soft Mute Switch", WM8996_DAC_SOFTMUTE, 0, 1, 0), + +SOC_DOUBLE_TLV("Digital Output 1 Volume", WM8996_DAC1_HPOUT1_VOLUME, 0, 4, + 8, 0, out_digital_tlv), +SOC_DOUBLE_TLV("Digital Output 2 Volume", WM8996_DAC2_HPOUT2_VOLUME, 0, 4, + 8, 0, out_digital_tlv), + +SOC_DOUBLE_R_TLV("Output 1 Volume", WM8996_OUTPUT1_LEFT_VOLUME, + WM8996_OUTPUT1_RIGHT_VOLUME, 0, 12, 0, out_tlv), +SOC_DOUBLE_R("Output 1 ZC Switch", WM8996_OUTPUT1_LEFT_VOLUME, + WM8996_OUTPUT1_RIGHT_VOLUME, 7, 1, 0), + +SOC_DOUBLE_R_TLV("Output 2 Volume", WM8996_OUTPUT2_LEFT_VOLUME, + WM8996_OUTPUT2_RIGHT_VOLUME, 0, 12, 0, out_tlv), +SOC_DOUBLE_R("Output 2 ZC Switch", WM8996_OUTPUT2_LEFT_VOLUME, + WM8996_OUTPUT2_RIGHT_VOLUME, 7, 1, 0), + +SOC_DOUBLE_TLV("Speaker Volume", WM8996_PDM_SPEAKER_VOLUME, 0, 4, 8, 0, + spk_tlv), +SOC_DOUBLE_R("Speaker Switch", WM8996_LEFT_PDM_SPEAKER, + WM8996_RIGHT_PDM_SPEAKER, 3, 1, 1), +SOC_DOUBLE_R("Speaker ZC Switch", WM8996_LEFT_PDM_SPEAKER, + WM8996_RIGHT_PDM_SPEAKER, 2, 1, 0), + +SOC_SINGLE("DSP1 EQ Switch", WM8996_DSP1_RX_EQ_GAINS_1, 0, 1, 0), +SOC_SINGLE("DSP2 EQ Switch", WM8996_DSP2_RX_EQ_GAINS_1, 0, 1, 0), +}; + +static const struct snd_kcontrol_new wm8996_eq_controls[] = { +SOC_SINGLE_TLV("DSP1 EQ B1 Volume", WM8996_DSP1_RX_EQ_GAINS_1, 11, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP1 EQ B2 Volume", WM8996_DSP1_RX_EQ_GAINS_1, 6, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP1 EQ B3 Volume", WM8996_DSP1_RX_EQ_GAINS_1, 1, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP1 EQ B4 Volume", WM8996_DSP1_RX_EQ_GAINS_2, 11, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP1 EQ B5 Volume", WM8996_DSP1_RX_EQ_GAINS_2, 6, 31, 0, + eq_tlv), + +SOC_SINGLE_TLV("DSP2 EQ B1 Volume", WM8996_DSP2_RX_EQ_GAINS_1, 11, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP2 EQ B2 Volume", WM8996_DSP2_RX_EQ_GAINS_1, 6, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP2 EQ B3 Volume", WM8996_DSP2_RX_EQ_GAINS_1, 1, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP2 EQ B4 Volume", WM8996_DSP2_RX_EQ_GAINS_2, 11, 31, 0, + eq_tlv), +SOC_SINGLE_TLV("DSP2 EQ B5 Volume", WM8996_DSP2_RX_EQ_GAINS_2, 6, 31, 0, + eq_tlv), +}; + +static int cp_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + switch (event) { + case SND_SOC_DAPM_POST_PMU: + msleep(5); + break; + default: + BUG(); + return -EINVAL; + } + + return 0; +} + +static int rmv_short_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(w->codec); + + /* Record which outputs we enabled */ + switch (event) { + case SND_SOC_DAPM_PRE_PMD: + wm8996->hpout_pending &= ~w->shift; + break; + case SND_SOC_DAPM_PRE_PMU: + wm8996->hpout_pending |= w->shift; + break; + default: + BUG(); + return -EINVAL; + } + + return 0; +} + +static void wait_for_dc_servo(struct snd_soc_codec *codec, u16 mask) +{ + struct i2c_client *i2c = to_i2c_client(codec->dev); + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int i, ret; + unsigned long timeout = 200; + + snd_soc_write(codec, WM8996_DC_SERVO_2, mask); + + /* Use the interrupt if possible */ + do { + if (i2c->irq) { + timeout = wait_for_completion_timeout(&wm8996->dcs_done, + msecs_to_jiffies(200)); + if (timeout == 0) + dev_err(codec->dev, "DC servo timed out\n"); + + } else { + msleep(1); + if (--i) { + timeout = 0; + break; + } + } + + ret = snd_soc_read(codec, WM8996_DC_SERVO_2); + dev_dbg(codec->dev, "DC servo state: %x\n", ret); + } while (ret & mask); + + if (timeout == 0) + dev_err(codec->dev, "DC servo timed out for %x\n", mask); + else + dev_dbg(codec->dev, "DC servo complete for %x\n", mask); +} + +static void wm8996_seq_notifier(struct snd_soc_dapm_context *dapm, + enum snd_soc_dapm_type event, int subseq) +{ + struct snd_soc_codec *codec = container_of(dapm, + struct snd_soc_codec, dapm); + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + u16 val, mask; + + /* Complete any pending DC servo starts */ + if (wm8996->dcs_pending) { + dev_dbg(codec->dev, "Starting DC servo for %x\n", + wm8996->dcs_pending); + + /* Trigger a startup sequence */ + wait_for_dc_servo(codec, wm8996->dcs_pending + << WM8996_DCS_TRIG_STARTUP_0_SHIFT); + + wm8996->dcs_pending = 0; + } + + if (wm8996->hpout_pending != wm8996->hpout_ena) { + dev_dbg(codec->dev, "Applying RMV_SHORTs %x->%x\n", + wm8996->hpout_ena, wm8996->hpout_pending); + + val = 0; + mask = 0; + if (wm8996->hpout_pending & HPOUT1L) { + val |= WM8996_HPOUT1L_RMV_SHORT; + mask |= WM8996_HPOUT1L_RMV_SHORT; + } else { + mask |= WM8996_HPOUT1L_RMV_SHORT | + WM8996_HPOUT1L_OUTP | + WM8996_HPOUT1L_DLY; + } + + if (wm8996->hpout_pending & HPOUT1R) { + val |= WM8996_HPOUT1R_RMV_SHORT; + mask |= WM8996_HPOUT1R_RMV_SHORT; + } else { + mask |= WM8996_HPOUT1R_RMV_SHORT | + WM8996_HPOUT1R_OUTP | + WM8996_HPOUT1R_DLY; + } + + snd_soc_update_bits(codec, WM8996_ANALOGUE_HP_1, mask, val); + + val = 0; + mask = 0; + if (wm8996->hpout_pending & HPOUT2L) { + val |= WM8996_HPOUT2L_RMV_SHORT; + mask |= WM8996_HPOUT2L_RMV_SHORT; + } else { + mask |= WM8996_HPOUT2L_RMV_SHORT | + WM8996_HPOUT2L_OUTP | + WM8996_HPOUT2L_DLY; + } + + if (wm8996->hpout_pending & HPOUT2R) { + val |= WM8996_HPOUT2R_RMV_SHORT; + mask |= WM8996_HPOUT2R_RMV_SHORT; + } else { + mask |= WM8996_HPOUT2R_RMV_SHORT | + WM8996_HPOUT2R_OUTP | + WM8996_HPOUT2R_DLY; + } + + snd_soc_update_bits(codec, WM8996_ANALOGUE_HP_2, mask, val); + + wm8996->hpout_ena = wm8996->hpout_pending; + } +} + +static int dcs_start(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(w->codec); + + switch (event) { + case SND_SOC_DAPM_POST_PMU: + wm8996->dcs_pending |= 1 << w->shift; + break; + default: + BUG(); + return -EINVAL; + } + + return 0; +} + +static const char *sidetone_text[] = { + "IN1", "IN2", +}; + +static const struct soc_enum left_sidetone_enum = + SOC_ENUM_SINGLE(WM8996_SIDETONE, 0, 2, sidetone_text); + +static const struct snd_kcontrol_new left_sidetone = + SOC_DAPM_ENUM("Left Sidetone", left_sidetone_enum); + +static const struct soc_enum right_sidetone_enum = + SOC_ENUM_SINGLE(WM8996_SIDETONE, 1, 2, sidetone_text); + +static const struct snd_kcontrol_new right_sidetone = + SOC_DAPM_ENUM("Right Sidetone", right_sidetone_enum); + +static const char *spk_text[] = { + "DAC1L", "DAC1R", "DAC2L", "DAC2R" +}; + +static const struct soc_enum spkl_enum = + SOC_ENUM_SINGLE(WM8996_LEFT_PDM_SPEAKER, 0, 4, spk_text); + +static const struct snd_kcontrol_new spkl_mux = + SOC_DAPM_ENUM("SPKL", spkl_enum); + +static const struct soc_enum spkr_enum = + SOC_ENUM_SINGLE(WM8996_RIGHT_PDM_SPEAKER, 0, 4, spk_text); + +static const struct snd_kcontrol_new spkr_mux = + SOC_DAPM_ENUM("SPKR", spkr_enum); + +static const char *dsp1rx_text[] = { + "AIF1", "AIF2" +}; + +static const struct soc_enum dsp1rx_enum = + SOC_ENUM_SINGLE(WM8996_POWER_MANAGEMENT_8, 0, 2, dsp1rx_text); + +static const struct snd_kcontrol_new dsp1rx = + SOC_DAPM_ENUM("DSP1RX", dsp1rx_enum); + +static const char *dsp2rx_text[] = { + "AIF2", "AIF1" +}; + +static const struct soc_enum dsp2rx_enum = + SOC_ENUM_SINGLE(WM8996_POWER_MANAGEMENT_8, 4, 2, dsp2rx_text); + +static const struct snd_kcontrol_new dsp2rx = + SOC_DAPM_ENUM("DSP2RX", dsp2rx_enum); + +static const char *aif2tx_text[] = { + "DSP2", "DSP1", "AIF1" +}; + +static const struct soc_enum aif2tx_enum = + SOC_ENUM_SINGLE(WM8996_POWER_MANAGEMENT_8, 6, 3, aif2tx_text); + +static const struct snd_kcontrol_new aif2tx = + SOC_DAPM_ENUM("AIF2TX", aif2tx_enum); + +static const char *inmux_text[] = { + "ADC", "DMIC1", "DMIC2" +}; + +static const struct soc_enum in1_enum = + SOC_ENUM_SINGLE(WM8996_POWER_MANAGEMENT_7, 0, 3, inmux_text); + +static const struct snd_kcontrol_new in1_mux = + SOC_DAPM_ENUM("IN1 Mux", in1_enum); + +static const struct soc_enum in2_enum = + SOC_ENUM_SINGLE(WM8996_POWER_MANAGEMENT_7, 4, 3, inmux_text); + +static const struct snd_kcontrol_new in2_mux = + SOC_DAPM_ENUM("IN2 Mux", in2_enum); + +static const struct snd_kcontrol_new dac2r_mix[] = { +SOC_DAPM_SINGLE("Right Sidetone Switch", WM8996_DAC2_RIGHT_MIXER_ROUTING, + 5, 1, 0), +SOC_DAPM_SINGLE("Left Sidetone Switch", WM8996_DAC2_RIGHT_MIXER_ROUTING, + 4, 1, 0), +SOC_DAPM_SINGLE("DSP2 Switch", WM8996_DAC2_RIGHT_MIXER_ROUTING, 1, 1, 0), +SOC_DAPM_SINGLE("DSP1 Switch", WM8996_DAC2_RIGHT_MIXER_ROUTING, 0, 1, 0), +}; + +static const struct snd_kcontrol_new dac2l_mix[] = { +SOC_DAPM_SINGLE("Right Sidetone Switch", WM8996_DAC2_LEFT_MIXER_ROUTING, + 5, 1, 0), +SOC_DAPM_SINGLE("Left Sidetone Switch", WM8996_DAC2_LEFT_MIXER_ROUTING, + 4, 1, 0), +SOC_DAPM_SINGLE("DSP2 Switch", WM8996_DAC2_LEFT_MIXER_ROUTING, 1, 1, 0), +SOC_DAPM_SINGLE("DSP1 Switch", WM8996_DAC2_LEFT_MIXER_ROUTING, 0, 1, 0), +}; + +static const struct snd_kcontrol_new dac1r_mix[] = { +SOC_DAPM_SINGLE("Right Sidetone Switch", WM8996_DAC1_RIGHT_MIXER_ROUTING, + 5, 1, 0), +SOC_DAPM_SINGLE("Left Sidetone Switch", WM8996_DAC1_RIGHT_MIXER_ROUTING, + 4, 1, 0), +SOC_DAPM_SINGLE("DSP2 Switch", WM8996_DAC1_RIGHT_MIXER_ROUTING, 1, 1, 0), +SOC_DAPM_SINGLE("DSP1 Switch", WM8996_DAC1_RIGHT_MIXER_ROUTING, 0, 1, 0), +}; + +static const struct snd_kcontrol_new dac1l_mix[] = { +SOC_DAPM_SINGLE("Right Sidetone Switch", WM8996_DAC1_LEFT_MIXER_ROUTING, + 5, 1, 0), +SOC_DAPM_SINGLE("Left Sidetone Switch", WM8996_DAC1_LEFT_MIXER_ROUTING, + 4, 1, 0), +SOC_DAPM_SINGLE("DSP2 Switch", WM8996_DAC1_LEFT_MIXER_ROUTING, 1, 1, 0), +SOC_DAPM_SINGLE("DSP1 Switch", WM8996_DAC1_LEFT_MIXER_ROUTING, 0, 1, 0), +}; + +static const struct snd_kcontrol_new dsp1txl[] = { +SOC_DAPM_SINGLE("IN1 Switch", WM8996_DSP1_TX_LEFT_MIXER_ROUTING, + 1, 1, 0), +SOC_DAPM_SINGLE("DAC Switch", WM8996_DSP1_TX_LEFT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new dsp1txr[] = { +SOC_DAPM_SINGLE("IN1 Switch", WM8996_DSP1_TX_RIGHT_MIXER_ROUTING, + 1, 1, 0), +SOC_DAPM_SINGLE("DAC Switch", WM8996_DSP1_TX_RIGHT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new dsp2txl[] = { +SOC_DAPM_SINGLE("IN1 Switch", WM8996_DSP2_TX_LEFT_MIXER_ROUTING, + 1, 1, 0), +SOC_DAPM_SINGLE("DAC Switch", WM8996_DSP2_TX_LEFT_MIXER_ROUTING, + 0, 1, 0), +}; + +static const struct snd_kcontrol_new dsp2txr[] = { +SOC_DAPM_SINGLE("IN1 Switch", WM8996_DSP2_TX_RIGHT_MIXER_ROUTING, + 1, 1, 0), +SOC_DAPM_SINGLE("DAC Switch", WM8996_DSP2_TX_RIGHT_MIXER_ROUTING, + 0, 1, 0), +}; + + +static const struct snd_soc_dapm_widget wm8996_dapm_widgets[] = { +SND_SOC_DAPM_INPUT("IN1LN"), +SND_SOC_DAPM_INPUT("IN1LP"), +SND_SOC_DAPM_INPUT("IN1RN"), +SND_SOC_DAPM_INPUT("IN1RP"), + +SND_SOC_DAPM_INPUT("IN2LN"), +SND_SOC_DAPM_INPUT("IN2LP"), +SND_SOC_DAPM_INPUT("IN2RN"), +SND_SOC_DAPM_INPUT("IN2RP"), + +SND_SOC_DAPM_INPUT("DMIC1DAT"), +SND_SOC_DAPM_INPUT("DMIC2DAT"), + +SND_SOC_DAPM_SUPPLY_S("SYSCLK", 1, WM8996_AIF_CLOCKING_1, 0, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY_S("SYSDSPCLK", 2, WM8996_CLOCKING_1, 1, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY_S("AIFCLK", 2, WM8996_CLOCKING_1, 2, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY_S("Charge Pump", 2, WM8996_CHARGE_PUMP_1, 15, 0, cp_event, + SND_SOC_DAPM_POST_PMU), + +SND_SOC_DAPM_SUPPLY("LDO2", WM8996_POWER_MANAGEMENT_2, 1, 0, NULL, 0), +SND_SOC_DAPM_MICBIAS("MICB2", WM8996_POWER_MANAGEMENT_1, 9, 0), +SND_SOC_DAPM_MICBIAS("MICB1", WM8996_POWER_MANAGEMENT_1, 8, 0), + +SND_SOC_DAPM_PGA("IN1L PGA", WM8996_POWER_MANAGEMENT_2, 5, 0, NULL, 0), +SND_SOC_DAPM_PGA("IN1R PGA", WM8996_POWER_MANAGEMENT_2, 4, 0, NULL, 0), + +SND_SOC_DAPM_MUX("IN1L Mux", SND_SOC_NOPM, 0, 0, &in1_mux), +SND_SOC_DAPM_MUX("IN1R Mux", SND_SOC_NOPM, 0, 0, &in1_mux), +SND_SOC_DAPM_MUX("IN2L Mux", SND_SOC_NOPM, 0, 0, &in2_mux), +SND_SOC_DAPM_MUX("IN2R Mux", SND_SOC_NOPM, 0, 0, &in2_mux), + +SND_SOC_DAPM_PGA("IN1L", WM8996_POWER_MANAGEMENT_7, 2, 0, NULL, 0), +SND_SOC_DAPM_PGA("IN1R", WM8996_POWER_MANAGEMENT_7, 3, 0, NULL, 0), +SND_SOC_DAPM_PGA("IN2L", WM8996_POWER_MANAGEMENT_7, 6, 0, NULL, 0), +SND_SOC_DAPM_PGA("IN2R", WM8996_POWER_MANAGEMENT_7, 7, 0, NULL, 0), + +SND_SOC_DAPM_SUPPLY("DMIC2", WM8996_POWER_MANAGEMENT_7, 9, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("DMIC1", WM8996_POWER_MANAGEMENT_7, 8, 0, NULL, 0), + +SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8996_POWER_MANAGEMENT_3, 5, 0), +SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8996_POWER_MANAGEMENT_3, 4, 0), +SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8996_POWER_MANAGEMENT_3, 3, 0), +SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8996_POWER_MANAGEMENT_3, 2, 0), + +SND_SOC_DAPM_ADC("ADCL", NULL, WM8996_POWER_MANAGEMENT_3, 1, 0), +SND_SOC_DAPM_ADC("ADCR", NULL, WM8996_POWER_MANAGEMENT_3, 0, 0), + +SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &left_sidetone), +SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &right_sidetone), + +SND_SOC_DAPM_AIF_IN("DSP2RXL", NULL, 0, WM8996_POWER_MANAGEMENT_3, 11, 0), +SND_SOC_DAPM_AIF_IN("DSP2RXR", NULL, 1, WM8996_POWER_MANAGEMENT_3, 10, 0), +SND_SOC_DAPM_AIF_IN("DSP1RXL", NULL, 0, WM8996_POWER_MANAGEMENT_3, 9, 0), +SND_SOC_DAPM_AIF_IN("DSP1RXR", NULL, 1, WM8996_POWER_MANAGEMENT_3, 8, 0), + +SND_SOC_DAPM_MIXER("DSP2TXL", WM8996_POWER_MANAGEMENT_5, 11, 0, + dsp2txl, ARRAY_SIZE(dsp2txl)), +SND_SOC_DAPM_MIXER("DSP2TXR", WM8996_POWER_MANAGEMENT_5, 10, 0, + dsp2txr, ARRAY_SIZE(dsp2txr)), +SND_SOC_DAPM_MIXER("DSP1TXL", WM8996_POWER_MANAGEMENT_5, 9, 0, + dsp1txl, ARRAY_SIZE(dsp1txl)), +SND_SOC_DAPM_MIXER("DSP1TXR", WM8996_POWER_MANAGEMENT_5, 8, 0, + dsp1txr, ARRAY_SIZE(dsp1txr)), + +SND_SOC_DAPM_MIXER("DAC2L Mixer", SND_SOC_NOPM, 0, 0, + dac2l_mix, ARRAY_SIZE(dac2l_mix)), +SND_SOC_DAPM_MIXER("DAC2R Mixer", SND_SOC_NOPM, 0, 0, + dac2r_mix, ARRAY_SIZE(dac2r_mix)), +SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, + dac1l_mix, ARRAY_SIZE(dac1l_mix)), +SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, + dac1r_mix, ARRAY_SIZE(dac1r_mix)), + +SND_SOC_DAPM_DAC("DAC2L", NULL, WM8996_POWER_MANAGEMENT_5, 3, 0), +SND_SOC_DAPM_DAC("DAC2R", NULL, WM8996_POWER_MANAGEMENT_5, 2, 0), +SND_SOC_DAPM_DAC("DAC1L", NULL, WM8996_POWER_MANAGEMENT_5, 1, 0), +SND_SOC_DAPM_DAC("DAC1R", NULL, WM8996_POWER_MANAGEMENT_5, 0, 0), + +SND_SOC_DAPM_AIF_IN("AIF2RX1", "AIF2 Playback", 1, + WM8996_POWER_MANAGEMENT_4, 9, 0), +SND_SOC_DAPM_AIF_IN("AIF2RX0", "AIF2 Playback", 2, + WM8996_POWER_MANAGEMENT_4, 8, 0), + +SND_SOC_DAPM_AIF_IN("AIF2TX1", "AIF2 Capture", 1, + WM8996_POWER_MANAGEMENT_6, 9, 0), +SND_SOC_DAPM_AIF_IN("AIF2TX0", "AIF2 Capture", 2, + WM8996_POWER_MANAGEMENT_6, 8, 0), + +SND_SOC_DAPM_AIF_IN("AIF1RX5", "AIF1 Playback", 5, + WM8996_POWER_MANAGEMENT_4, 5, 0), +SND_SOC_DAPM_AIF_IN("AIF1RX4", "AIF1 Playback", 4, + WM8996_POWER_MANAGEMENT_4, 4, 0), +SND_SOC_DAPM_AIF_IN("AIF1RX3", "AIF1 Playback", 3, + WM8996_POWER_MANAGEMENT_4, 3, 0), +SND_SOC_DAPM_AIF_IN("AIF1RX2", "AIF1 Playback", 2, + WM8996_POWER_MANAGEMENT_4, 2, 0), +SND_SOC_DAPM_AIF_IN("AIF1RX1", "AIF1 Playback", 1, + WM8996_POWER_MANAGEMENT_4, 1, 0), +SND_SOC_DAPM_AIF_IN("AIF1RX0", "AIF1 Playback", 0, + WM8996_POWER_MANAGEMENT_4, 0, 0), + +SND_SOC_DAPM_AIF_OUT("AIF1TX5", "AIF1 Capture", 5, + WM8996_POWER_MANAGEMENT_6, 5, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX4", "AIF1 Capture", 4, + WM8996_POWER_MANAGEMENT_6, 4, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX3", "AIF1 Capture", 3, + WM8996_POWER_MANAGEMENT_6, 3, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX2", "AIF1 Capture", 2, + WM8996_POWER_MANAGEMENT_6, 2, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX1", "AIF1 Capture", 1, + WM8996_POWER_MANAGEMENT_6, 1, 0), +SND_SOC_DAPM_AIF_OUT("AIF1TX0", "AIF1 Capture", 0, + WM8996_POWER_MANAGEMENT_6, 0, 0), + +/* We route as stereo pairs so define some dummy widgets to squash + * things down for now. RXA = 0,1, RXB = 2,3 and so on */ +SND_SOC_DAPM_PGA("AIF1RXA", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_PGA("AIF1RXB", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_PGA("AIF1RXC", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_PGA("AIF2RX", SND_SOC_NOPM, 0, 0, NULL, 0), +SND_SOC_DAPM_PGA("DSP2TX", SND_SOC_NOPM, 0, 0, NULL, 0), + +SND_SOC_DAPM_MUX("DSP1RX", SND_SOC_NOPM, 0, 0, &dsp1rx), +SND_SOC_DAPM_MUX("DSP2RX", SND_SOC_NOPM, 0, 0, &dsp2rx), +SND_SOC_DAPM_MUX("AIF2TX", SND_SOC_NOPM, 0, 0, &aif2tx), + +SND_SOC_DAPM_MUX("SPKL", SND_SOC_NOPM, 0, 0, &spkl_mux), +SND_SOC_DAPM_MUX("SPKR", SND_SOC_NOPM, 0, 0, &spkr_mux), +SND_SOC_DAPM_PGA("SPKL PGA", WM8996_LEFT_PDM_SPEAKER, 4, 0, NULL, 0), +SND_SOC_DAPM_PGA("SPKR PGA", WM8996_RIGHT_PDM_SPEAKER, 4, 0, NULL, 0), + +SND_SOC_DAPM_PGA_S("HPOUT2L PGA", 0, WM8996_POWER_MANAGEMENT_1, 7, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT2L_DLY", 1, WM8996_ANALOGUE_HP_2, 5, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT2L_DCS", 2, WM8996_DC_SERVO_1, 2, 0, dcs_start, + SND_SOC_DAPM_POST_PMU), +SND_SOC_DAPM_PGA_S("HPOUT2L_OUTP", 3, WM8996_ANALOGUE_HP_2, 6, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT2L_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT2L, 0, + rmv_short_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + +SND_SOC_DAPM_PGA_S("HPOUT2R PGA", 0, WM8996_POWER_MANAGEMENT_1, 6, 0,NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT2R_DLY", 1, WM8996_ANALOGUE_HP_2, 1, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT2R_DCS", 2, WM8996_DC_SERVO_1, 3, 0, dcs_start, + SND_SOC_DAPM_POST_PMU), +SND_SOC_DAPM_PGA_S("HPOUT2R_OUTP", 3, WM8996_ANALOGUE_HP_2, 2, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT2R_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT2R, 0, + rmv_short_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + +SND_SOC_DAPM_PGA_S("HPOUT1L PGA", 0, WM8996_POWER_MANAGEMENT_1, 5, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT1L_DLY", 1, WM8996_ANALOGUE_HP_1, 5, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT1L_DCS", 2, WM8996_DC_SERVO_1, 0, 0, dcs_start, + SND_SOC_DAPM_POST_PMU), +SND_SOC_DAPM_PGA_S("HPOUT1L_OUTP", 3, WM8996_ANALOGUE_HP_1, 6, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT1L_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT1L, 0, + rmv_short_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + +SND_SOC_DAPM_PGA_S("HPOUT1R PGA", 0, WM8996_POWER_MANAGEMENT_1, 4, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT1R_DLY", 1, WM8996_ANALOGUE_HP_1, 1, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT1R_DCS", 2, WM8996_DC_SERVO_1, 1, 0, dcs_start, + SND_SOC_DAPM_POST_PMU), +SND_SOC_DAPM_PGA_S("HPOUT1R_OUTP", 3, WM8996_ANALOGUE_HP_1, 2, 0, NULL, 0), +SND_SOC_DAPM_PGA_S("HPOUT1R_RMV_SHORT", 3, SND_SOC_NOPM, HPOUT1R, 0, + rmv_short_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + +SND_SOC_DAPM_OUTPUT("HPOUT1L"), +SND_SOC_DAPM_OUTPUT("HPOUT1R"), +SND_SOC_DAPM_OUTPUT("HPOUT2L"), +SND_SOC_DAPM_OUTPUT("HPOUT2R"), +SND_SOC_DAPM_OUTPUT("SPKDAT"), +}; + +static const struct snd_soc_dapm_route wm8996_dapm_routes[] = { + { "AIFCLK", NULL, "SYSCLK" }, + { "SYSDSPCLK", NULL, "SYSCLK" }, + { "Charge Pump", NULL, "SYSCLK" }, + + { "MICB1", NULL, "LDO2" }, + { "MICB2", NULL, "LDO2" }, + + { "IN1L PGA", NULL, "IN2LN" }, + { "IN1L PGA", NULL, "IN2LP" }, + { "IN1L PGA", NULL, "IN1LN" }, + { "IN1L PGA", NULL, "IN1LP" }, + + { "IN1R PGA", NULL, "IN2RN" }, + { "IN1R PGA", NULL, "IN2RP" }, + { "IN1R PGA", NULL, "IN1RN" }, + { "IN1R PGA", NULL, "IN1RP" }, + + { "ADCL", NULL, "IN1L PGA" }, + + { "ADCR", NULL, "IN1R PGA" }, + + { "DMIC1L", NULL, "DMIC1DAT" }, + { "DMIC1R", NULL, "DMIC1DAT" }, + { "DMIC2L", NULL, "DMIC2DAT" }, + { "DMIC2R", NULL, "DMIC2DAT" }, + + { "DMIC2L", NULL, "DMIC2" }, + { "DMIC2R", NULL, "DMIC2" }, + { "DMIC1L", NULL, "DMIC1" }, + { "DMIC1R", NULL, "DMIC1" }, + + { "IN1L Mux", "ADC", "ADCL" }, + { "IN1L Mux", "DMIC1", "DMIC1L" }, + { "IN1L Mux", "DMIC2", "DMIC2L" }, + + { "IN1R Mux", "ADC", "ADCR" }, + { "IN1R Mux", "DMIC1", "DMIC1R" }, + { "IN1R Mux", "DMIC2", "DMIC2R" }, + + { "IN2L Mux", "ADC", "ADCL" }, + { "IN2L Mux", "DMIC1", "DMIC1L" }, + { "IN2L Mux", "DMIC2", "DMIC2L" }, + + { "IN2R Mux", "ADC", "ADCR" }, + { "IN2R Mux", "DMIC1", "DMIC1R" }, + { "IN2R Mux", "DMIC2", "DMIC2R" }, + + { "Left Sidetone", "IN1", "IN1L Mux" }, + { "Left Sidetone", "IN2", "IN2L Mux" }, + + { "Right Sidetone", "IN1", "IN1R Mux" }, + { "Right Sidetone", "IN2", "IN2R Mux" }, + + { "DSP1TXL", "IN1 Switch", "IN1L Mux" }, + { "DSP1TXR", "IN1 Switch", "IN1R Mux" }, + + { "DSP2TXL", "IN1 Switch", "IN2L Mux" }, + { "DSP2TXR", "IN1 Switch", "IN2R Mux" }, + + { "AIF1TX0", NULL, "DSP1TXL" }, + { "AIF1TX1", NULL, "DSP1TXR" }, + { "AIF1TX2", NULL, "DSP2TXL" }, + { "AIF1TX3", NULL, "DSP2TXR" }, + { "AIF1TX4", NULL, "AIF2RX0" }, + { "AIF1TX5", NULL, "AIF2RX1" }, + + { "AIF1RX0", NULL, "AIFCLK" }, + { "AIF1RX1", NULL, "AIFCLK" }, + { "AIF1RX2", NULL, "AIFCLK" }, + { "AIF1RX3", NULL, "AIFCLK" }, + { "AIF1RX4", NULL, "AIFCLK" }, + { "AIF1RX5", NULL, "AIFCLK" }, + + { "AIF2RX0", NULL, "AIFCLK" }, + { "AIF2RX1", NULL, "AIFCLK" }, + + { "DSP1RXL", NULL, "SYSDSPCLK" }, + { "DSP1RXR", NULL, "SYSDSPCLK" }, + { "DSP2RXL", NULL, "SYSDSPCLK" }, + { "DSP2RXR", NULL, "SYSDSPCLK" }, + { "DSP1TXL", NULL, "SYSDSPCLK" }, + { "DSP1TXR", NULL, "SYSDSPCLK" }, + { "DSP2TXL", NULL, "SYSDSPCLK" }, + { "DSP2TXR", NULL, "SYSDSPCLK" }, + + { "AIF1RXA", NULL, "AIF1RX0" }, + { "AIF1RXA", NULL, "AIF1RX1" }, + { "AIF1RXB", NULL, "AIF1RX2" }, + { "AIF1RXB", NULL, "AIF1RX3" }, + { "AIF1RXC", NULL, "AIF1RX4" }, + { "AIF1RXC", NULL, "AIF1RX5" }, + + { "AIF2RX", NULL, "AIF2RX0" }, + { "AIF2RX", NULL, "AIF2RX1" }, + + { "AIF2TX", "DSP2", "DSP2TX" }, + { "AIF2TX", "DSP1", "DSP1RX" }, + { "AIF2TX", "AIF1", "AIF1RXC" }, + + { "DSP1RXL", NULL, "DSP1RX" }, + { "DSP1RXR", NULL, "DSP1RX" }, + { "DSP2RXL", NULL, "DSP2RX" }, + { "DSP2RXR", NULL, "DSP2RX" }, + + { "DSP2TX", NULL, "DSP2TXL" }, + { "DSP2TX", NULL, "DSP2TXR" }, + + { "DSP1RX", "AIF1", "AIF1RXA" }, + { "DSP1RX", "AIF2", "AIF2RX" }, + + { "DSP2RX", "AIF1", "AIF1RXB" }, + { "DSP2RX", "AIF2", "AIF2RX" }, + + { "DAC2L Mixer", "DSP2 Switch", "DSP2RXL" }, + { "DAC2L Mixer", "DSP1 Switch", "DSP1RXL" }, + { "DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, + { "DAC2L Mixer", "Left Sidetone Switch", "Left Sidetone" }, + + { "DAC2R Mixer", "DSP2 Switch", "DSP2RXR" }, + { "DAC2R Mixer", "DSP1 Switch", "DSP1RXR" }, + { "DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" }, + { "DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" }, + + { "DAC1L Mixer", "DSP2 Switch", "DSP2RXL" }, + { "DAC1L Mixer", "DSP1 Switch", "DSP1RXL" }, + { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, + { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, + + { "DAC1R Mixer", "DSP2 Switch", "DSP2RXR" }, + { "DAC1R Mixer", "DSP1 Switch", "DSP1RXR" }, + { "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" }, + { "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" }, + + { "DAC1L", NULL, "DAC1L Mixer" }, + { "DAC1R", NULL, "DAC1R Mixer" }, + { "DAC2L", NULL, "DAC2L Mixer" }, + { "DAC2R", NULL, "DAC2R Mixer" }, + + { "HPOUT2L PGA", NULL, "Charge Pump" }, + { "HPOUT2L PGA", NULL, "DAC2L" }, + { "HPOUT2L_DLY", NULL, "HPOUT2L PGA" }, + { "HPOUT2L_DCS", NULL, "HPOUT2L_DLY" }, + { "HPOUT2L_OUTP", NULL, "HPOUT2L_DCS" }, + { "HPOUT2L_RMV_SHORT", NULL, "HPOUT2L_OUTP" }, + + { "HPOUT2R PGA", NULL, "Charge Pump" }, + { "HPOUT2R PGA", NULL, "DAC2R" }, + { "HPOUT2R_DLY", NULL, "HPOUT2R PGA" }, + { "HPOUT2R_DCS", NULL, "HPOUT2R_DLY" }, + { "HPOUT2R_OUTP", NULL, "HPOUT2R_DCS" }, + { "HPOUT2R_RMV_SHORT", NULL, "HPOUT2R_OUTP" }, + + { "HPOUT1L PGA", NULL, "Charge Pump" }, + { "HPOUT1L PGA", NULL, "DAC1L" }, + { "HPOUT1L_DLY", NULL, "HPOUT1L PGA" }, + { "HPOUT1L_DCS", NULL, "HPOUT1L_DLY" }, + { "HPOUT1L_OUTP", NULL, "HPOUT1L_DCS" }, + { "HPOUT1L_RMV_SHORT", NULL, "HPOUT1L_OUTP" }, + + { "HPOUT1R PGA", NULL, "Charge Pump" }, + { "HPOUT1R PGA", NULL, "DAC1R" }, + { "HPOUT1R_DLY", NULL, "HPOUT1R PGA" }, + { "HPOUT1R_DCS", NULL, "HPOUT1R_DLY" }, + { "HPOUT1R_OUTP", NULL, "HPOUT1R_DCS" }, + { "HPOUT1R_RMV_SHORT", NULL, "HPOUT1R_OUTP" }, + + { "HPOUT2L", NULL, "HPOUT2L_RMV_SHORT" }, + { "HPOUT2R", NULL, "HPOUT2R_RMV_SHORT" }, + { "HPOUT1L", NULL, "HPOUT1L_RMV_SHORT" }, + { "HPOUT1R", NULL, "HPOUT1R_RMV_SHORT" }, + + { "SPKL", "DAC1L", "DAC1L" }, + { "SPKL", "DAC1R", "DAC1R" }, + { "SPKL", "DAC2L", "DAC2L" }, + { "SPKL", "DAC2R", "DAC2R" }, + + { "SPKR", "DAC1L", "DAC1L" }, + { "SPKR", "DAC1R", "DAC1R" }, + { "SPKR", "DAC2L", "DAC2L" }, + { "SPKR", "DAC2R", "DAC2R" }, + + { "SPKL PGA", NULL, "SPKL" }, + { "SPKR PGA", NULL, "SPKR" }, + + { "SPKDAT", NULL, "SPKL PGA" }, + { "SPKDAT", NULL, "SPKR PGA" }, +}; + +static int wm8996_readable_register(struct snd_soc_codec *codec, + unsigned int reg) +{ + /* Due to the sparseness of the register map the compiler + * output from an explicit switch statement ends up being much + * more efficient than a table. + */ + switch (reg) { + case WM8996_SOFTWARE_RESET: + case WM8996_POWER_MANAGEMENT_1: + case WM8996_POWER_MANAGEMENT_2: + case WM8996_POWER_MANAGEMENT_3: + case WM8996_POWER_MANAGEMENT_4: + case WM8996_POWER_MANAGEMENT_5: + case WM8996_POWER_MANAGEMENT_6: + case WM8996_POWER_MANAGEMENT_7: + case WM8996_POWER_MANAGEMENT_8: + case WM8996_LEFT_LINE_INPUT_VOLUME: + case WM8996_RIGHT_LINE_INPUT_VOLUME: + case WM8996_LINE_INPUT_CONTROL: + case WM8996_DAC1_HPOUT1_VOLUME: + case WM8996_DAC2_HPOUT2_VOLUME: + case WM8996_DAC1_LEFT_VOLUME: + case WM8996_DAC1_RIGHT_VOLUME: + case WM8996_DAC2_LEFT_VOLUME: + case WM8996_DAC2_RIGHT_VOLUME: + case WM8996_OUTPUT1_LEFT_VOLUME: + case WM8996_OUTPUT1_RIGHT_VOLUME: + case WM8996_OUTPUT2_LEFT_VOLUME: + case WM8996_OUTPUT2_RIGHT_VOLUME: + case WM8996_MICBIAS_1: + case WM8996_MICBIAS_2: + case WM8996_LDO_1: + case WM8996_LDO_2: + case WM8996_ACCESSORY_DETECT_MODE_1: + case WM8996_ACCESSORY_DETECT_MODE_2: + case WM8996_HEADPHONE_DETECT_1: + case WM8996_HEADPHONE_DETECT_2: + case WM8996_MIC_DETECT_1: + case WM8996_MIC_DETECT_2: + case WM8996_MIC_DETECT_3: + case WM8996_CHARGE_PUMP_1: + case WM8996_CHARGE_PUMP_2: + case WM8996_DC_SERVO_1: + case WM8996_DC_SERVO_2: + case WM8996_DC_SERVO_3: + case WM8996_DC_SERVO_5: + case WM8996_DC_SERVO_6: + case WM8996_DC_SERVO_7: + case WM8996_DC_SERVO_READBACK_0: + case WM8996_ANALOGUE_HP_1: + case WM8996_ANALOGUE_HP_2: + case WM8996_CHIP_REVISION: + case WM8996_CONTROL_INTERFACE_1: + case WM8996_WRITE_SEQUENCER_CTRL_1: + case WM8996_WRITE_SEQUENCER_CTRL_2: + case WM8996_AIF_CLOCKING_1: + case WM8996_AIF_CLOCKING_2: + case WM8996_CLOCKING_1: + case WM8996_CLOCKING_2: + case WM8996_AIF_RATE: + case WM8996_FLL_CONTROL_1: + case WM8996_FLL_CONTROL_2: + case WM8996_FLL_CONTROL_3: + case WM8996_FLL_CONTROL_4: + case WM8996_FLL_CONTROL_5: + case WM8996_FLL_CONTROL_6: + case WM8996_FLL_EFS_1: + case WM8996_FLL_EFS_2: + case WM8996_AIF1_CONTROL: + case WM8996_AIF1_BCLK: + case WM8996_AIF1_TX_LRCLK_1: + case WM8996_AIF1_TX_LRCLK_2: + case WM8996_AIF1_RX_LRCLK_1: + case WM8996_AIF1_RX_LRCLK_2: + case WM8996_AIF1TX_DATA_CONFIGURATION_1: + case WM8996_AIF1TX_DATA_CONFIGURATION_2: + case WM8996_AIF1RX_DATA_CONFIGURATION: + case WM8996_AIF1TX_CHANNEL_0_CONFIGURATION: + case WM8996_AIF1TX_CHANNEL_1_CONFIGURATION: + case WM8996_AIF1TX_CHANNEL_2_CONFIGURATION: + case WM8996_AIF1TX_CHANNEL_3_CONFIGURATION: + case WM8996_AIF1TX_CHANNEL_4_CONFIGURATION: + case WM8996_AIF1TX_CHANNEL_5_CONFIGURATION: + case WM8996_AIF1RX_CHANNEL_0_CONFIGURATION: + case WM8996_AIF1RX_CHANNEL_1_CONFIGURATION: + case WM8996_AIF1RX_CHANNEL_2_CONFIGURATION: + case WM8996_AIF1RX_CHANNEL_3_CONFIGURATION: + case WM8996_AIF1RX_CHANNEL_4_CONFIGURATION: + case WM8996_AIF1RX_CHANNEL_5_CONFIGURATION: + case WM8996_AIF1RX_MONO_CONFIGURATION: + case WM8996_AIF1TX_TEST: + case WM8996_AIF2_CONTROL: + case WM8996_AIF2_BCLK: + case WM8996_AIF2_TX_LRCLK_1: + case WM8996_AIF2_TX_LRCLK_2: + case WM8996_AIF2_RX_LRCLK_1: + case WM8996_AIF2_RX_LRCLK_2: + case WM8996_AIF2TX_DATA_CONFIGURATION_1: + case WM8996_AIF2TX_DATA_CONFIGURATION_2: + case WM8996_AIF2RX_DATA_CONFIGURATION: + case WM8996_AIF2TX_CHANNEL_0_CONFIGURATION: + case WM8996_AIF2TX_CHANNEL_1_CONFIGURATION: + case WM8996_AIF2RX_CHANNEL_0_CONFIGURATION: + case WM8996_AIF2RX_CHANNEL_1_CONFIGURATION: + case WM8996_AIF2RX_MONO_CONFIGURATION: + case WM8996_AIF2TX_TEST: + case WM8996_DSP1_TX_LEFT_VOLUME: + case WM8996_DSP1_TX_RIGHT_VOLUME: + case WM8996_DSP1_RX_LEFT_VOLUME: + case WM8996_DSP1_RX_RIGHT_VOLUME: + case WM8996_DSP1_TX_FILTERS: + case WM8996_DSP1_RX_FILTERS_1: + case WM8996_DSP1_RX_FILTERS_2: + case WM8996_DSP1_DRC_1: + case WM8996_DSP1_DRC_2: + case WM8996_DSP1_DRC_3: + case WM8996_DSP1_DRC_4: + case WM8996_DSP1_DRC_5: + case WM8996_DSP1_RX_EQ_GAINS_1: + case WM8996_DSP1_RX_EQ_GAINS_2: + case WM8996_DSP1_RX_EQ_BAND_1_A: + case WM8996_DSP1_RX_EQ_BAND_1_B: + case WM8996_DSP1_RX_EQ_BAND_1_PG: + case WM8996_DSP1_RX_EQ_BAND_2_A: + case WM8996_DSP1_RX_EQ_BAND_2_B: + case WM8996_DSP1_RX_EQ_BAND_2_C: + case WM8996_DSP1_RX_EQ_BAND_2_PG: + case WM8996_DSP1_RX_EQ_BAND_3_A: + case WM8996_DSP1_RX_EQ_BAND_3_B: + case WM8996_DSP1_RX_EQ_BAND_3_C: + case WM8996_DSP1_RX_EQ_BAND_3_PG: + case WM8996_DSP1_RX_EQ_BAND_4_A: + case WM8996_DSP1_RX_EQ_BAND_4_B: + case WM8996_DSP1_RX_EQ_BAND_4_C: + case WM8996_DSP1_RX_EQ_BAND_4_PG: + case WM8996_DSP1_RX_EQ_BAND_5_A: + case WM8996_DSP1_RX_EQ_BAND_5_B: + case WM8996_DSP1_RX_EQ_BAND_5_PG: + case WM8996_DSP2_TX_LEFT_VOLUME: + case WM8996_DSP2_TX_RIGHT_VOLUME: + case WM8996_DSP2_RX_LEFT_VOLUME: + case WM8996_DSP2_RX_RIGHT_VOLUME: + case WM8996_DSP2_TX_FILTERS: + case WM8996_DSP2_RX_FILTERS_1: + case WM8996_DSP2_RX_FILTERS_2: + case WM8996_DSP2_DRC_1: + case WM8996_DSP2_DRC_2: + case WM8996_DSP2_DRC_3: + case WM8996_DSP2_DRC_4: + case WM8996_DSP2_DRC_5: + case WM8996_DSP2_RX_EQ_GAINS_1: + case WM8996_DSP2_RX_EQ_GAINS_2: + case WM8996_DSP2_RX_EQ_BAND_1_A: + case WM8996_DSP2_RX_EQ_BAND_1_B: + case WM8996_DSP2_RX_EQ_BAND_1_PG: + case WM8996_DSP2_RX_EQ_BAND_2_A: + case WM8996_DSP2_RX_EQ_BAND_2_B: + case WM8996_DSP2_RX_EQ_BAND_2_C: + case WM8996_DSP2_RX_EQ_BAND_2_PG: + case WM8996_DSP2_RX_EQ_BAND_3_A: + case WM8996_DSP2_RX_EQ_BAND_3_B: + case WM8996_DSP2_RX_EQ_BAND_3_C: + case WM8996_DSP2_RX_EQ_BAND_3_PG: + case WM8996_DSP2_RX_EQ_BAND_4_A: + case WM8996_DSP2_RX_EQ_BAND_4_B: + case WM8996_DSP2_RX_EQ_BAND_4_C: + case WM8996_DSP2_RX_EQ_BAND_4_PG: + case WM8996_DSP2_RX_EQ_BAND_5_A: + case WM8996_DSP2_RX_EQ_BAND_5_B: + case WM8996_DSP2_RX_EQ_BAND_5_PG: + case WM8996_DAC1_MIXER_VOLUMES: + case WM8996_DAC1_LEFT_MIXER_ROUTING: + case WM8996_DAC1_RIGHT_MIXER_ROUTING: + case WM8996_DAC2_MIXER_VOLUMES: + case WM8996_DAC2_LEFT_MIXER_ROUTING: + case WM8996_DAC2_RIGHT_MIXER_ROUTING: + case WM8996_DSP1_TX_LEFT_MIXER_ROUTING: + case WM8996_DSP1_TX_RIGHT_MIXER_ROUTING: + case WM8996_DSP2_TX_LEFT_MIXER_ROUTING: + case WM8996_DSP2_TX_RIGHT_MIXER_ROUTING: + case WM8996_DSP_TX_MIXER_SELECT: + case WM8996_DAC_SOFTMUTE: + case WM8996_OVERSAMPLING: + case WM8996_SIDETONE: + case WM8996_GPIO_1: + case WM8996_GPIO_2: + case WM8996_GPIO_3: + case WM8996_GPIO_4: + case WM8996_GPIO_5: + case WM8996_PULL_CONTROL_1: + case WM8996_PULL_CONTROL_2: + case WM8996_INTERRUPT_STATUS_1: + case WM8996_INTERRUPT_STATUS_2: + case WM8996_INTERRUPT_RAW_STATUS_2: + case WM8996_INTERRUPT_STATUS_1_MASK: + case WM8996_INTERRUPT_STATUS_2_MASK: + case WM8996_INTERRUPT_CONTROL: + case WM8996_LEFT_PDM_SPEAKER: + case WM8996_RIGHT_PDM_SPEAKER: + case WM8996_PDM_SPEAKER_MUTE_SEQUENCE: + case WM8996_PDM_SPEAKER_VOLUME: + return 1; + default: + return 0; + } +} + +static int wm8996_volatile_register(struct snd_soc_codec *codec, + unsigned int reg) +{ + switch (reg) { + case WM8996_SOFTWARE_RESET: + case WM8996_CHIP_REVISION: + case WM8996_LDO_1: + case WM8996_LDO_2: + case WM8996_INTERRUPT_STATUS_1: + case WM8996_INTERRUPT_STATUS_2: + case WM8996_INTERRUPT_RAW_STATUS_2: + case WM8996_DC_SERVO_READBACK_0: + case WM8996_DC_SERVO_2: + case WM8996_DC_SERVO_6: + case WM8996_DC_SERVO_7: + case WM8996_FLL_CONTROL_6: + case WM8996_MIC_DETECT_3: + case WM8996_HEADPHONE_DETECT_1: + case WM8996_HEADPHONE_DETECT_2: + return 1; + default: + return 0; + } +} + +static int wm8996_reset(struct snd_soc_codec *codec) +{ + return snd_soc_write(codec, WM8996_SOFTWARE_RESET, 0x8915); +} + +static const int bclk_divs[] = { + 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96 +}; + +static void wm8996_update_bclk(struct snd_soc_codec *codec) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int aif, best, cur_val, bclk_rate, bclk_reg, i; + + /* Don't bother if we're in a low frequency idle mode that + * can't support audio. + */ + if (wm8996->sysclk < 64000) + return; + + for (aif = 0; aif < WM8996_AIFS; aif++) { + switch (aif) { + case 0: + bclk_reg = WM8996_AIF1_BCLK; + break; + case 1: + bclk_reg = WM8996_AIF2_BCLK; + break; + } + + bclk_rate = wm8996->bclk_rate[aif]; + + /* Pick a divisor for BCLK as close as we can get to ideal */ + best = 0; + for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) { + cur_val = (wm8996->sysclk / bclk_divs[i]) - bclk_rate; + if (cur_val < 0) /* BCLK table is sorted */ + break; + best = i; + } + bclk_rate = wm8996->sysclk / bclk_divs[best]; + dev_dbg(codec->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n", + bclk_divs[best], bclk_rate); + + snd_soc_update_bits(codec, bclk_reg, + WM8996_AIF1_BCLK_DIV_MASK, best); + } +} + +static int wm8996_set_bias_level(struct snd_soc_codec *codec, + enum snd_soc_bias_level level) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int ret; + + switch (level) { + case SND_SOC_BIAS_ON: + break; + + case SND_SOC_BIAS_PREPARE: + if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) { + snd_soc_update_bits(codec, WM8996_POWER_MANAGEMENT_1, + WM8996_BG_ENA, WM8996_BG_ENA); + msleep(2); + } + break; + + case SND_SOC_BIAS_STANDBY: + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { + ret = regulator_bulk_enable(ARRAY_SIZE(wm8996->supplies), + wm8996->supplies); + if (ret != 0) { + dev_err(codec->dev, + "Failed to enable supplies: %d\n", + ret); + return ret; + } + + if (wm8996->pdata.ldo_ena >= 0) { + gpio_set_value_cansleep(wm8996->pdata.ldo_ena, + 1); + msleep(5); + } + + codec->cache_only = false; + snd_soc_cache_sync(codec); + } + + snd_soc_update_bits(codec, WM8996_POWER_MANAGEMENT_1, + WM8996_BG_ENA, 0); + break; + + case SND_SOC_BIAS_OFF: + codec->cache_only = true; + if (wm8996->pdata.ldo_ena >= 0) + gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0); + regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), + wm8996->supplies); + break; + } + + codec->dapm.bias_level = level; + + return 0; +} + +static int wm8996_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) +{ + struct snd_soc_codec *codec = dai->codec; + int aifctrl = 0; + int bclk = 0; + int lrclk_tx = 0; + int lrclk_rx = 0; + int aifctrl_reg, bclk_reg, lrclk_tx_reg, lrclk_rx_reg; + + switch (dai->id) { + case 0: + aifctrl_reg = WM8996_AIF1_CONTROL; + bclk_reg = WM8996_AIF1_BCLK; + lrclk_tx_reg = WM8996_AIF1_TX_LRCLK_2; + lrclk_rx_reg = WM8996_AIF1_RX_LRCLK_2; + break; + case 1: + aifctrl_reg = WM8996_AIF2_CONTROL; + bclk_reg = WM8996_AIF2_BCLK; + lrclk_tx_reg = WM8996_AIF2_TX_LRCLK_2; + lrclk_rx_reg = WM8996_AIF2_RX_LRCLK_2; + break; + default: + BUG(); + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { + case SND_SOC_DAIFMT_NB_NF: + break; + case SND_SOC_DAIFMT_IB_NF: + bclk |= WM8996_AIF1_BCLK_INV; + break; + case SND_SOC_DAIFMT_NB_IF: + lrclk_tx |= WM8996_AIF1TX_LRCLK_INV; + lrclk_rx |= WM8996_AIF1RX_LRCLK_INV; + break; + case SND_SOC_DAIFMT_IB_IF: + bclk |= WM8996_AIF1_BCLK_INV; + lrclk_tx |= WM8996_AIF1TX_LRCLK_INV; + lrclk_rx |= WM8996_AIF1RX_LRCLK_INV; + break; + } + + switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { + case SND_SOC_DAIFMT_CBS_CFS: + break; + case SND_SOC_DAIFMT_CBS_CFM: + lrclk_tx |= WM8996_AIF1TX_LRCLK_MSTR; + lrclk_rx |= WM8996_AIF1RX_LRCLK_MSTR; + break; + case SND_SOC_DAIFMT_CBM_CFS: + bclk |= WM8996_AIF1_BCLK_MSTR; + break; + case SND_SOC_DAIFMT_CBM_CFM: + bclk |= WM8996_AIF1_BCLK_MSTR; + lrclk_tx |= WM8996_AIF1TX_LRCLK_MSTR; + lrclk_rx |= WM8996_AIF1RX_LRCLK_MSTR; + break; + default: + return -EINVAL; + } + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_A: + break; + case SND_SOC_DAIFMT_DSP_B: + aifctrl |= 1; + break; + case SND_SOC_DAIFMT_I2S: + aifctrl |= 2; + break; + case SND_SOC_DAIFMT_LEFT_J: + aifctrl |= 3; + break; + default: + return -EINVAL; + } + + snd_soc_update_bits(codec, aifctrl_reg, WM8996_AIF1_FMT_MASK, aifctrl); + snd_soc_update_bits(codec, bclk_reg, + WM8996_AIF1_BCLK_INV | WM8996_AIF1_BCLK_MSTR, + bclk); + snd_soc_update_bits(codec, lrclk_tx_reg, + WM8996_AIF1TX_LRCLK_INV | + WM8996_AIF1TX_LRCLK_MSTR, + lrclk_tx); + snd_soc_update_bits(codec, lrclk_rx_reg, + WM8996_AIF1RX_LRCLK_INV | + WM8996_AIF1RX_LRCLK_MSTR, + lrclk_rx); + + return 0; +} + +static const int dsp_divs[] = { + 48000, 32000, 16000, 8000 +}; + +static int wm8996_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct snd_soc_codec *codec = dai->codec; + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int bits, i, bclk_rate; + int aifdata = 0; + int lrclk = 0; + int dsp = 0; + int aifdata_reg, lrclk_reg, dsp_shift; + + switch (dai->id) { + case 0: + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || + (snd_soc_read(codec, WM8996_GPIO_1)) & WM8996_GP1_FN_MASK) { + aifdata_reg = WM8996_AIF1RX_DATA_CONFIGURATION; + lrclk_reg = WM8996_AIF1_RX_LRCLK_1; + } else { + aifdata_reg = WM8996_AIF1TX_DATA_CONFIGURATION_1; + lrclk_reg = WM8996_AIF1_TX_LRCLK_1; + } + dsp_shift = 0; + break; + case 1: + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || + (snd_soc_read(codec, WM8996_GPIO_2)) & WM8996_GP2_FN_MASK) { + aifdata_reg = WM8996_AIF2RX_DATA_CONFIGURATION; + lrclk_reg = WM8996_AIF2_RX_LRCLK_1; + } else { + aifdata_reg = WM8996_AIF2TX_DATA_CONFIGURATION_1; + lrclk_reg = WM8996_AIF2_TX_LRCLK_1; + } + dsp_shift = WM8996_DSP2_DIV_SHIFT; + break; + default: + BUG(); + return -EINVAL; + } + + bclk_rate = snd_soc_params_to_bclk(params); + if (bclk_rate < 0) { + dev_err(codec->dev, "Unsupported BCLK rate: %d\n", bclk_rate); + return bclk_rate; + } + + wm8996->bclk_rate[dai->id] = bclk_rate; + wm8996->rx_rate[dai->id] = params_rate(params); + + /* Needs looking at for TDM */ + bits = snd_pcm_format_width(params_format(params)); + if (bits < 0) + return bits; + aifdata |= (bits << WM8996_AIF1TX_WL_SHIFT) | bits; + + for (i = 0; i < ARRAY_SIZE(dsp_divs); i++) { + if (dsp_divs[i] == params_rate(params)) + break; + } + if (i == ARRAY_SIZE(dsp_divs)) { + dev_err(codec->dev, "Unsupported sample rate %dHz\n", + params_rate(params)); + return -EINVAL; + } + dsp |= i << dsp_shift; + + wm8996_update_bclk(codec); + + lrclk = bclk_rate / params_rate(params); + dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n", + lrclk, bclk_rate / lrclk); + + snd_soc_update_bits(codec, aifdata_reg, + WM8996_AIF1TX_WL_MASK | + WM8996_AIF1TX_SLOT_LEN_MASK, + aifdata); + snd_soc_update_bits(codec, lrclk_reg, WM8996_AIF1RX_RATE_MASK, + lrclk); + snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_2, + WM8996_DSP1_DIV_SHIFT << dsp_shift, dsp); + + return 0; +} + +static int wm8996_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct snd_soc_codec *codec = dai->codec; + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int lfclk = 0; + int ratediv = 0; + int src; + int old; + + if (freq == wm8996->sysclk && clk_id == wm8996->sysclk_src) + return 0; + + /* Disable SYSCLK while we reconfigure */ + old = snd_soc_read(codec, WM8996_AIF_CLOCKING_1) & WM8996_SYSCLK_ENA; + snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_1, + WM8996_SYSCLK_ENA, 0); + + switch (clk_id) { + case WM8996_SYSCLK_MCLK1: + wm8996->sysclk = freq; + src = 0; + break; + case WM8996_SYSCLK_MCLK2: + wm8996->sysclk = freq; + src = 1; + break; + case WM8996_SYSCLK_FLL: + wm8996->sysclk = freq; + src = 2; + break; + default: + dev_err(codec->dev, "Unsupported clock source %d\n", clk_id); + return -EINVAL; + } + + switch (wm8996->sysclk) { + case 6144000: + snd_soc_update_bits(codec, WM8996_AIF_RATE, + WM8996_SYSCLK_RATE, 0); + break; + case 24576000: + ratediv = WM8996_SYSCLK_DIV; + case 12288000: + snd_soc_update_bits(codec, WM8996_AIF_RATE, + WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE); + break; + case 32000: + case 32768: + lfclk = WM8996_LFCLK_ENA; + break; + default: + dev_warn(codec->dev, "Unsupported clock rate %dHz\n", + wm8996->sysclk); + return -EINVAL; + } + + wm8996_update_bclk(codec); + + snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_1, + WM8996_SYSCLK_SRC_MASK | WM8996_SYSCLK_DIV_MASK, + src << WM8996_SYSCLK_SRC_SHIFT | ratediv); + snd_soc_update_bits(codec, WM8996_CLOCKING_1, WM8996_LFCLK_ENA, lfclk); + snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_1, + WM8996_SYSCLK_ENA, old); + + wm8996->sysclk_src = clk_id; + + return 0; +} + +struct _fll_div { + u16 fll_fratio; + u16 fll_outdiv; + u16 fll_refclk_div; + u16 fll_loop_gain; + u16 fll_ref_freq; + u16 n; + u16 theta; + u16 lambda; +}; + +static struct { + unsigned int min; + unsigned int max; + u16 fll_fratio; + int ratio; +} fll_fratios[] = { + { 0, 64000, 4, 16 }, + { 64000, 128000, 3, 8 }, + { 128000, 256000, 2, 4 }, + { 256000, 1000000, 1, 2 }, + { 1000000, 13500000, 0, 1 }, +}; + +static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, + unsigned int Fout) +{ + unsigned int target; + unsigned int div; + unsigned int fratio, gcd_fll; + int i; + + /* Fref must be <=13.5MHz */ + div = 1; + fll_div->fll_refclk_div = 0; + while ((Fref / div) > 13500000) { + div *= 2; + fll_div->fll_refclk_div++; + + if (div > 8) { + pr_err("Can't scale %dMHz input down to <=13.5MHz\n", + Fref); + return -EINVAL; + } + } + + pr_debug("FLL Fref=%u Fout=%u\n", Fref, Fout); + + /* Apply the division for our remaining calculations */ + Fref /= div; + + if (Fref >= 3000000) + fll_div->fll_loop_gain = 5; + else + fll_div->fll_loop_gain = 0; + + if (Fref >= 48000) + fll_div->fll_ref_freq = 0; + else + fll_div->fll_ref_freq = 1; + + /* Fvco should be 90-100MHz; don't check the upper bound */ + div = 2; + while (Fout * div < 90000000) { + div++; + if (div > 64) { + pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n", + Fout); + return -EINVAL; + } + } + target = Fout * div; + fll_div->fll_outdiv = div - 1; + + pr_debug("FLL Fvco=%dHz\n", target); + + /* Find an appropraite FLL_FRATIO and factor it out of the target */ + for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) { + if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) { + fll_div->fll_fratio = fll_fratios[i].fll_fratio; + fratio = fll_fratios[i].ratio; + break; + } + } + if (i == ARRAY_SIZE(fll_fratios)) { + pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref); + return -EINVAL; + } + + fll_div->n = target / (fratio * Fref); + + if (target % Fref == 0) { + fll_div->theta = 0; + fll_div->lambda = 0; + } else { + gcd_fll = gcd(target, fratio * Fref); + + fll_div->theta = (target - (fll_div->n * fratio * Fref)) + / gcd_fll; + fll_div->lambda = (fratio * Fref) / gcd_fll; + } + + pr_debug("FLL N=%x THETA=%x LAMBDA=%x\n", + fll_div->n, fll_div->theta, fll_div->lambda); + pr_debug("FLL_FRATIO=%x FLL_OUTDIV=%x FLL_REFCLK_DIV=%x\n", + fll_div->fll_fratio, fll_div->fll_outdiv, + fll_div->fll_refclk_div); + + return 0; +} + +static int wm8996_set_fll(struct snd_soc_codec *codec, int fll_id, int source, + unsigned int Fref, unsigned int Fout) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + struct i2c_client *i2c = to_i2c_client(codec->dev); + struct _fll_div fll_div; + unsigned long timeout; + int ret, reg; + + /* Any change? */ + if (source == wm8996->fll_src && Fref == wm8996->fll_fref && + Fout == wm8996->fll_fout) + return 0; + + if (Fout == 0) { + dev_dbg(codec->dev, "FLL disabled\n"); + + wm8996->fll_fref = 0; + wm8996->fll_fout = 0; + + snd_soc_update_bits(codec, WM8996_FLL_CONTROL_1, + WM8996_FLL_ENA, 0); + + return 0; + } + + ret = fll_factors(&fll_div, Fref, Fout); + if (ret != 0) + return ret; + + switch (source) { + case WM8996_FLL_MCLK1: + reg = 0; + break; + case WM8996_FLL_MCLK2: + reg = 1; + break; + case WM8996_FLL_DACLRCLK1: + reg = 2; + break; + case WM8996_FLL_BCLK1: + reg = 3; + break; + default: + dev_err(codec->dev, "Unknown FLL source %d\n", ret); + return -EINVAL; + } + + reg |= fll_div.fll_refclk_div << WM8996_FLL_REFCLK_DIV_SHIFT; + reg |= fll_div.fll_ref_freq << WM8996_FLL_REF_FREQ_SHIFT; + + snd_soc_update_bits(codec, WM8996_FLL_CONTROL_5, + WM8996_FLL_REFCLK_DIV_MASK | WM8996_FLL_REF_FREQ | + WM8996_FLL_REFCLK_SRC_MASK, reg); + + reg = 0; + if (fll_div.theta || fll_div.lambda) + reg |= WM8996_FLL_EFS_ENA | (3 << WM8996_FLL_LFSR_SEL_SHIFT); + else + reg |= 1 << WM8996_FLL_LFSR_SEL_SHIFT; + snd_soc_write(codec, WM8996_FLL_EFS_2, reg); + + snd_soc_update_bits(codec, WM8996_FLL_CONTROL_2, + WM8996_FLL_OUTDIV_MASK | + WM8996_FLL_FRATIO_MASK, + (fll_div.fll_outdiv << WM8996_FLL_OUTDIV_SHIFT) | + (fll_div.fll_fratio)); + + snd_soc_write(codec, WM8996_FLL_CONTROL_3, fll_div.theta); + + snd_soc_update_bits(codec, WM8996_FLL_CONTROL_4, + WM8996_FLL_N_MASK | WM8996_FLL_LOOP_GAIN_MASK, + (fll_div.n << WM8996_FLL_N_SHIFT) | + fll_div.fll_loop_gain); + + snd_soc_write(codec, WM8996_FLL_EFS_1, fll_div.lambda); + + snd_soc_update_bits(codec, WM8996_FLL_CONTROL_1, + WM8996_FLL_ENA, WM8996_FLL_ENA); + + /* The FLL supports live reconfiguration - kick that in case we were + * already enabled. + */ + snd_soc_write(codec, WM8996_FLL_CONTROL_6, WM8996_FLL_SWITCH_CLK); + + /* Wait for the FLL to lock, using the interrupt if possible */ + if (Fref > 1000000) + timeout = usecs_to_jiffies(300); + else + timeout = msecs_to_jiffies(2); + + /* Allow substantially longer if we've actually got the IRQ */ + if (i2c->irq) + timeout *= 1000; + + ret = wait_for_completion_timeout(&wm8996->fll_lock, timeout); + + if (ret == 0 && i2c->irq) { + dev_err(codec->dev, "Timed out waiting for FLL\n"); + ret = -ETIMEDOUT; + } else { + ret = 0; + } + + dev_dbg(codec->dev, "FLL configured for %dHz->%dHz\n", Fref, Fout); + + wm8996->fll_fref = Fref; + wm8996->fll_fout = Fout; + wm8996->fll_src = source; + + return ret; +} + +#ifdef CONFIG_GPIOLIB +static inline struct wm8996_priv *gpio_to_wm8996(struct gpio_chip *chip) +{ + return container_of(chip, struct wm8996_priv, gpio_chip); +} + +static void wm8996_gpio_set(struct gpio_chip *chip, unsigned offset, int value) +{ + struct wm8996_priv *wm8996 = gpio_to_wm8996(chip); + struct snd_soc_codec *codec = wm8996->codec; + + snd_soc_update_bits(codec, WM8996_GPIO_1 + offset, + WM8996_GP1_LVL, !!value << WM8996_GP1_LVL_SHIFT); +} + +static int wm8996_gpio_direction_out(struct gpio_chip *chip, + unsigned offset, int value) +{ + struct wm8996_priv *wm8996 = gpio_to_wm8996(chip); + struct snd_soc_codec *codec = wm8996->codec; + int val; + + val = (1 << WM8996_GP1_FN_SHIFT) | (!!value << WM8996_GP1_LVL_SHIFT); + + return snd_soc_update_bits(codec, WM8996_GPIO_1 + offset, + WM8996_GP1_FN_MASK | WM8996_GP1_DIR | + WM8996_GP1_LVL, val); +} + +static int wm8996_gpio_get(struct gpio_chip *chip, unsigned offset) +{ + struct wm8996_priv *wm8996 = gpio_to_wm8996(chip); + struct snd_soc_codec *codec = wm8996->codec; + int ret; + + ret = snd_soc_read(codec, WM8996_GPIO_1 + offset); + if (ret < 0) + return ret; + + return (ret & WM8996_GP1_LVL) != 0; +} + +static int wm8996_gpio_direction_in(struct gpio_chip *chip, unsigned offset) +{ + struct wm8996_priv *wm8996 = gpio_to_wm8996(chip); + struct snd_soc_codec *codec = wm8996->codec; + + return snd_soc_update_bits(codec, WM8996_GPIO_1 + offset, + WM8996_GP1_FN_MASK | WM8996_GP1_DIR, + (1 << WM8996_GP1_FN_SHIFT) | + (1 << WM8996_GP1_DIR_SHIFT)); +} + +static struct gpio_chip wm8996_template_chip = { + .label = "wm8996", + .owner = THIS_MODULE, + .direction_output = wm8996_gpio_direction_out, + .set = wm8996_gpio_set, + .direction_input = wm8996_gpio_direction_in, + .get = wm8996_gpio_get, + .can_sleep = 1, +}; + +static void wm8996_init_gpio(struct snd_soc_codec *codec) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int ret; + + wm8996->gpio_chip = wm8996_template_chip; + wm8996->gpio_chip.ngpio = 5; + wm8996->gpio_chip.dev = codec->dev; + + if (wm8996->pdata.gpio_base) + wm8996->gpio_chip.base = wm8996->pdata.gpio_base; + else + wm8996->gpio_chip.base = -1; + + ret = gpiochip_add(&wm8996->gpio_chip); + if (ret != 0) + dev_err(codec->dev, "Failed to add GPIOs: %d\n", ret); +} + +static void wm8996_free_gpio(struct snd_soc_codec *codec) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int ret; + + ret = gpiochip_remove(&wm8996->gpio_chip); + if (ret != 0) + dev_err(codec->dev, "Failed to remove GPIOs: %d\n", ret); +} +#else +static void wm8996_init_gpio(struct snd_soc_codec *codec) +{ +} + +static void wm8996_free_gpio(struct snd_soc_codec *codec) +{ +} +#endif + +/** + * wm8996_detect - Enable default WM8996 jack detection + * + * The WM8996 has advanced accessory detection support for headsets. + * This function provides a default implementation which integrates + * the majority of this functionality with minimal user configuration. + * + * This will detect headset, headphone and short circuit button and + * will also detect inverted microphone ground connections and update + * the polarity of the connections. + */ +int wm8996_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, + wm8996_polarity_fn polarity_cb) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + + wm8996->jack = jack; + wm8996->detecting = true; + wm8996->polarity_cb = polarity_cb; + + if (wm8996->polarity_cb) + wm8996->polarity_cb(codec, 0); + + /* Clear discarge to avoid noise during detection */ + snd_soc_update_bits(codec, WM8996_MICBIAS_1, + WM8996_MICB1_DISCH, 0); + snd_soc_update_bits(codec, WM8996_MICBIAS_2, + WM8996_MICB2_DISCH, 0); + + /* LDO2 powers the microphones, SYSCLK clocks detection */ + snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2"); + snd_soc_dapm_force_enable_pin(&codec->dapm, "SYSCLK"); + + /* We start off just enabling microphone detection - even a + * plain headphone will trigger detection. + */ + snd_soc_update_bits(codec, WM8996_MIC_DETECT_1, + WM8996_MICD_ENA, WM8996_MICD_ENA); + + /* Slowest detection rate, gives debounce for initial detection */ + snd_soc_update_bits(codec, WM8996_MIC_DETECT_1, + WM8996_MICD_RATE_MASK, + WM8996_MICD_RATE_MASK); + + /* Enable interrupts and we're off */ + snd_soc_update_bits(codec, WM8996_INTERRUPT_STATUS_2_MASK, + WM8996_IM_MICD_EINT, 0); + + return 0; +} +EXPORT_SYMBOL_GPL(wm8996_detect); + +static void wm8996_micd(struct snd_soc_codec *codec) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int val, reg; + + val = snd_soc_read(codec, WM8996_MIC_DETECT_3); + + dev_dbg(codec->dev, "Microphone event: %x\n", val); + + if (!(val & WM8996_MICD_VALID)) { + dev_warn(codec->dev, "Microphone detection state invalid\n"); + return; + } + + /* No accessory, reset everything and report removal */ + if (!(val & WM8996_MICD_STS)) { + dev_dbg(codec->dev, "Jack removal detected\n"); + wm8996->jack_mic = false; + wm8996->detecting = true; + snd_soc_jack_report(wm8996->jack, 0, + SND_JACK_HEADSET | SND_JACK_BTN_0); + snd_soc_update_bits(codec, WM8996_MIC_DETECT_1, + WM8996_MICD_RATE_MASK, + WM8996_MICD_RATE_MASK); + return; + } + + /* If the measurement is very high we've got a microphone but + * do a little debounce to account for mechanical issues. + */ + if (val & 0x400) { + dev_dbg(codec->dev, "Microphone detected\n"); + snd_soc_jack_report(wm8996->jack, SND_JACK_HEADSET, + SND_JACK_HEADSET | SND_JACK_BTN_0); + wm8996->jack_mic = true; + wm8996->detecting = false; + + /* Increase poll rate to give better responsiveness + * for buttons */ + snd_soc_update_bits(codec, WM8996_MIC_DETECT_1, + WM8996_MICD_RATE_MASK, + 5 << WM8996_MICD_RATE_SHIFT); + } + + /* If we detected a lower impedence during initial startup + * then we probably have the wrong polarity, flip it. Don't + * do this for the lowest impedences to speed up detection of + * plain headphones. + */ + if (wm8996->detecting && (val & 0x3f0)) { + reg = snd_soc_read(codec, WM8996_ACCESSORY_DETECT_MODE_2); + reg ^= WM8996_HPOUT1FB_SRC | WM8996_MICD_SRC | + WM8996_MICD_BIAS_SRC; + snd_soc_update_bits(codec, WM8996_ACCESSORY_DETECT_MODE_2, + WM8996_HPOUT1FB_SRC | WM8996_MICD_SRC | + WM8996_MICD_BIAS_SRC, reg); + + if (wm8996->polarity_cb) + wm8996->polarity_cb(codec, + (reg & WM8996_MICD_SRC) != 0); + + dev_dbg(codec->dev, "Set microphone polarity to %d\n", + (reg & WM8996_MICD_SRC) != 0); + + return; + } + + /* Don't distinguish between buttons, just report any low + * impedence as BTN_0. + */ + if (val & 0x3fc) { + if (wm8996->jack_mic) { + dev_dbg(codec->dev, "Mic button detected\n"); + snd_soc_jack_report(wm8996->jack, + SND_JACK_HEADSET | SND_JACK_BTN_0, + SND_JACK_HEADSET | SND_JACK_BTN_0); + } else { + dev_dbg(codec->dev, "Headphone detected\n"); + snd_soc_jack_report(wm8996->jack, + SND_JACK_HEADPHONE, + SND_JACK_HEADSET | + SND_JACK_BTN_0); + + /* Increase the detection rate a bit for + * responsiveness. + */ + snd_soc_update_bits(codec, WM8996_MIC_DETECT_1, + WM8996_MICD_RATE_MASK, + 7 << WM8996_MICD_RATE_SHIFT); + + wm8996->detecting = false; + } + } +} + +static irqreturn_t wm8996_irq(int irq, void *data) +{ + struct snd_soc_codec *codec = data; + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + int irq_val; + + irq_val = snd_soc_read(codec, WM8996_INTERRUPT_STATUS_2); + if (irq_val < 0) { + dev_err(codec->dev, "Failed to read IRQ status: %d\n", + irq_val); + return IRQ_NONE; + } + irq_val &= ~snd_soc_read(codec, WM8996_INTERRUPT_STATUS_2_MASK); + + if (irq_val & (WM8996_DCS_DONE_01_EINT | WM8996_DCS_DONE_23_EINT)) { + dev_dbg(codec->dev, "DC servo IRQ\n"); + complete(&wm8996->dcs_done); + } + + if (irq_val & WM8996_FIFOS_ERR_EINT) + dev_err(codec->dev, "Digital core FIFO error\n"); + + if (irq_val & WM8996_FLL_LOCK_EINT) { + dev_dbg(codec->dev, "FLL locked\n"); + complete(&wm8996->fll_lock); + } + + if (irq_val & WM8996_MICD_EINT) + wm8996_micd(codec); + + if (irq_val) { + snd_soc_write(codec, WM8996_INTERRUPT_STATUS_2, irq_val); + + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + +static irqreturn_t wm8996_edge_irq(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + irqreturn_t val; + + do { + val = wm8996_irq(irq, data); + if (val != IRQ_NONE) + ret = val; + } while (val != IRQ_NONE); + + return ret; +} + +static void wm8996_retune_mobile_pdata(struct snd_soc_codec *codec) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + struct wm8996_pdata *pdata = &wm8996->pdata; + + struct snd_kcontrol_new controls[] = { + SOC_ENUM_EXT("DSP1 EQ Mode", + wm8996->retune_mobile_enum, + wm8996_get_retune_mobile_enum, + wm8996_put_retune_mobile_enum), + SOC_ENUM_EXT("DSP2 EQ Mode", + wm8996->retune_mobile_enum, + wm8996_get_retune_mobile_enum, + wm8996_put_retune_mobile_enum), + }; + int ret, i, j; + const char **t; + + /* We need an array of texts for the enum API but the number + * of texts is likely to be less than the number of + * configurations due to the sample rate dependency of the + * configurations. */ + wm8996->num_retune_mobile_texts = 0; + wm8996->retune_mobile_texts = NULL; + for (i = 0; i < pdata->num_retune_mobile_cfgs; i++) { + for (j = 0; j < wm8996->num_retune_mobile_texts; j++) { + if (strcmp(pdata->retune_mobile_cfgs[i].name, + wm8996->retune_mobile_texts[j]) == 0) + break; + } + + if (j != wm8996->num_retune_mobile_texts) + continue; + + /* Expand the array... */ + t = krealloc(wm8996->retune_mobile_texts, + sizeof(char *) * + (wm8996->num_retune_mobile_texts + 1), + GFP_KERNEL); + if (t == NULL) + continue; + + /* ...store the new entry... */ + t[wm8996->num_retune_mobile_texts] = + pdata->retune_mobile_cfgs[i].name; + + /* ...and remember the new version. */ + wm8996->num_retune_mobile_texts++; + wm8996->retune_mobile_texts = t; + } + + dev_dbg(codec->dev, "Allocated %d unique ReTune Mobile names\n", + wm8996->num_retune_mobile_texts); + + wm8996->retune_mobile_enum.max = wm8996->num_retune_mobile_texts; + wm8996->retune_mobile_enum.texts = wm8996->retune_mobile_texts; + + ret = snd_soc_add_controls(codec, controls, ARRAY_SIZE(controls)); + if (ret != 0) + dev_err(codec->dev, + "Failed to add ReTune Mobile controls: %d\n", ret); +} + +static int wm8996_probe(struct snd_soc_codec *codec) +{ + int ret; + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + struct i2c_client *i2c = to_i2c_client(codec->dev); + struct snd_soc_dapm_context *dapm = &codec->dapm; + int i, irq_flags; + + wm8996->codec = codec; + + init_completion(&wm8996->dcs_done); + init_completion(&wm8996->fll_lock); + + dapm->idle_bias_off = true; + dapm->bias_level = SND_SOC_BIAS_OFF; + + ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C); + if (ret != 0) { + dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); + goto err; + } + + for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) + wm8996->supplies[i].supply = wm8996_supply_names[i]; + + ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8996->supplies), + wm8996->supplies); + if (ret != 0) { + dev_err(codec->dev, "Failed to request supplies: %d\n", ret); + goto err; + } + + wm8996->disable_nb[0].notifier_call = wm8996_regulator_event_0; + wm8996->disable_nb[1].notifier_call = wm8996_regulator_event_1; + wm8996->disable_nb[2].notifier_call = wm8996_regulator_event_2; + wm8996->disable_nb[3].notifier_call = wm8996_regulator_event_3; + + /* This should really be moved into the regulator core */ + for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) { + ret = regulator_register_notifier(wm8996->supplies[i].consumer, + &wm8996->disable_nb[i]); + if (ret != 0) { + dev_err(codec->dev, + "Failed to register regulator notifier: %d\n", + ret); + } + } + + ret = regulator_bulk_enable(ARRAY_SIZE(wm8996->supplies), + wm8996->supplies); + if (ret != 0) { + dev_err(codec->dev, "Failed to enable supplies: %d\n", ret); + goto err_get; + } + + if (wm8996->pdata.ldo_ena >= 0) { + gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 1); + msleep(5); + } + + ret = snd_soc_read(codec, WM8996_SOFTWARE_RESET); + if (ret < 0) { + dev_err(codec->dev, "Failed to read ID register: %d\n", ret); + goto err_enable; + } + if (ret != 0x8915) { + dev_err(codec->dev, "Device is not a WM8996, ID %x\n", ret); + ret = -EINVAL; + goto err_enable; + } + + ret = snd_soc_read(codec, WM8996_CHIP_REVISION); + if (ret < 0) { + dev_err(codec->dev, "Failed to read device revision: %d\n", + ret); + goto err_enable; + } + + dev_info(codec->dev, "revision %c\n", + (ret & WM8996_CHIP_REV_MASK) + 'A'); + + if (wm8996->pdata.ldo_ena >= 0) { + gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0); + } else { + ret = wm8996_reset(codec); + if (ret < 0) { + dev_err(codec->dev, "Failed to issue reset\n"); + goto err_enable; + } + } + + codec->cache_only = true; + + /* Apply platform data settings */ + snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL, + WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK, + wm8996->pdata.inl_mode << WM8996_INL_MODE_SHIFT | + wm8996->pdata.inr_mode); + + for (i = 0; i < ARRAY_SIZE(wm8996->pdata.gpio_default); i++) { + if (!wm8996->pdata.gpio_default[i]) + continue; + + snd_soc_write(codec, WM8996_GPIO_1 + i, + wm8996->pdata.gpio_default[i] & 0xffff); + } + + if (wm8996->pdata.spkmute_seq) + snd_soc_update_bits(codec, WM8996_PDM_SPEAKER_MUTE_SEQUENCE, + WM8996_SPK_MUTE_ENDIAN | + WM8996_SPK_MUTE_SEQ1_MASK, + wm8996->pdata.spkmute_seq); + + snd_soc_update_bits(codec, WM8996_ACCESSORY_DETECT_MODE_2, + WM8996_MICD_BIAS_SRC | WM8996_HPOUT1FB_SRC | + WM8996_MICD_SRC, wm8996->pdata.micdet_def); + + /* Latch volume update bits */ + snd_soc_update_bits(codec, WM8996_LEFT_LINE_INPUT_VOLUME, + WM8996_IN1_VU, WM8996_IN1_VU); + snd_soc_update_bits(codec, WM8996_RIGHT_LINE_INPUT_VOLUME, + WM8996_IN1_VU, WM8996_IN1_VU); + + snd_soc_update_bits(codec, WM8996_DAC1_LEFT_VOLUME, + WM8996_DAC1_VU, WM8996_DAC1_VU); + snd_soc_update_bits(codec, WM8996_DAC1_RIGHT_VOLUME, + WM8996_DAC1_VU, WM8996_DAC1_VU); + snd_soc_update_bits(codec, WM8996_DAC2_LEFT_VOLUME, + WM8996_DAC2_VU, WM8996_DAC2_VU); + snd_soc_update_bits(codec, WM8996_DAC2_RIGHT_VOLUME, + WM8996_DAC2_VU, WM8996_DAC2_VU); + + snd_soc_update_bits(codec, WM8996_OUTPUT1_LEFT_VOLUME, + WM8996_DAC1_VU, WM8996_DAC1_VU); + snd_soc_update_bits(codec, WM8996_OUTPUT1_RIGHT_VOLUME, + WM8996_DAC1_VU, WM8996_DAC1_VU); + snd_soc_update_bits(codec, WM8996_OUTPUT2_LEFT_VOLUME, + WM8996_DAC2_VU, WM8996_DAC2_VU); + snd_soc_update_bits(codec, WM8996_OUTPUT2_RIGHT_VOLUME, + WM8996_DAC2_VU, WM8996_DAC2_VU); + + snd_soc_update_bits(codec, WM8996_DSP1_TX_LEFT_VOLUME, + WM8996_DSP1TX_VU, WM8996_DSP1TX_VU); + snd_soc_update_bits(codec, WM8996_DSP1_TX_RIGHT_VOLUME, + WM8996_DSP1TX_VU, WM8996_DSP1TX_VU); + snd_soc_update_bits(codec, WM8996_DSP2_TX_LEFT_VOLUME, + WM8996_DSP2TX_VU, WM8996_DSP2TX_VU); + snd_soc_update_bits(codec, WM8996_DSP2_TX_RIGHT_VOLUME, + WM8996_DSP2TX_VU, WM8996_DSP2TX_VU); + + snd_soc_update_bits(codec, WM8996_DSP1_RX_LEFT_VOLUME, + WM8996_DSP1RX_VU, WM8996_DSP1RX_VU); + snd_soc_update_bits(codec, WM8996_DSP1_RX_RIGHT_VOLUME, + WM8996_DSP1RX_VU, WM8996_DSP1RX_VU); + snd_soc_update_bits(codec, WM8996_DSP2_RX_LEFT_VOLUME, + WM8996_DSP2RX_VU, WM8996_DSP2RX_VU); + snd_soc_update_bits(codec, WM8996_DSP2_RX_RIGHT_VOLUME, + WM8996_DSP2RX_VU, WM8996_DSP2RX_VU); + + /* No support currently for the underclocked TDM modes and + * pick a default TDM layout with each channel pair working with + * slots 0 and 1. */ + snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_0_CONFIGURATION, + WM8996_AIF1RX_CHAN0_SLOTS_MASK | + WM8996_AIF1RX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1RX_CHAN0_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_1_CONFIGURATION, + WM8996_AIF1RX_CHAN1_SLOTS_MASK | + WM8996_AIF1RX_CHAN1_START_SLOT_MASK, + 1 << WM8996_AIF1RX_CHAN1_SLOTS_SHIFT | 1); + snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_2_CONFIGURATION, + WM8996_AIF1RX_CHAN2_SLOTS_MASK | + WM8996_AIF1RX_CHAN2_START_SLOT_MASK, + 1 << WM8996_AIF1RX_CHAN2_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_3_CONFIGURATION, + WM8996_AIF1RX_CHAN3_SLOTS_MASK | + WM8996_AIF1RX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1RX_CHAN3_SLOTS_SHIFT | 1); + snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_4_CONFIGURATION, + WM8996_AIF1RX_CHAN4_SLOTS_MASK | + WM8996_AIF1RX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1RX_CHAN4_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_5_CONFIGURATION, + WM8996_AIF1RX_CHAN5_SLOTS_MASK | + WM8996_AIF1RX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1RX_CHAN5_SLOTS_SHIFT | 1); + + snd_soc_update_bits(codec, WM8996_AIF2RX_CHANNEL_0_CONFIGURATION, + WM8996_AIF2RX_CHAN0_SLOTS_MASK | + WM8996_AIF2RX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF2RX_CHAN0_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF2RX_CHANNEL_1_CONFIGURATION, + WM8996_AIF2RX_CHAN1_SLOTS_MASK | + WM8996_AIF2RX_CHAN1_START_SLOT_MASK, + 1 << WM8996_AIF2RX_CHAN1_SLOTS_SHIFT | 1); + + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_0_CONFIGURATION, + WM8996_AIF1TX_CHAN0_SLOTS_MASK | + WM8996_AIF1TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN0_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_1_CONFIGURATION, + WM8996_AIF1TX_CHAN1_SLOTS_MASK | + WM8996_AIF1TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN1_SLOTS_SHIFT | 1); + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_2_CONFIGURATION, + WM8996_AIF1TX_CHAN2_SLOTS_MASK | + WM8996_AIF1TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN2_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_3_CONFIGURATION, + WM8996_AIF1TX_CHAN3_SLOTS_MASK | + WM8996_AIF1TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN3_SLOTS_SHIFT | 1); + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_4_CONFIGURATION, + WM8996_AIF1TX_CHAN4_SLOTS_MASK | + WM8996_AIF1TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN4_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_5_CONFIGURATION, + WM8996_AIF1TX_CHAN5_SLOTS_MASK | + WM8996_AIF1TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN5_SLOTS_SHIFT | 1); + + snd_soc_update_bits(codec, WM8996_AIF2TX_CHANNEL_0_CONFIGURATION, + WM8996_AIF2TX_CHAN0_SLOTS_MASK | + WM8996_AIF2TX_CHAN0_START_SLOT_MASK, + 1 << WM8996_AIF2TX_CHAN0_SLOTS_SHIFT | 0); + snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_1_CONFIGURATION, + WM8996_AIF2TX_CHAN1_SLOTS_MASK | + WM8996_AIF2TX_CHAN1_START_SLOT_MASK, + 1 << WM8996_AIF1TX_CHAN1_SLOTS_SHIFT | 1); + + if (wm8996->pdata.num_retune_mobile_cfgs) + wm8996_retune_mobile_pdata(codec); + else + snd_soc_add_controls(codec, wm8996_eq_controls, + ARRAY_SIZE(wm8996_eq_controls)); + + /* If the TX LRCLK pins are not in LRCLK mode configure the + * AIFs to source their clocks from the RX LRCLKs. + */ + if ((snd_soc_read(codec, WM8996_GPIO_1))) + snd_soc_update_bits(codec, WM8996_AIF1_TX_LRCLK_2, + WM8996_AIF1TX_LRCLK_MODE, + WM8996_AIF1TX_LRCLK_MODE); + + if ((snd_soc_read(codec, WM8996_GPIO_2))) + snd_soc_update_bits(codec, WM8996_AIF2_TX_LRCLK_2, + WM8996_AIF2TX_LRCLK_MODE, + WM8996_AIF2TX_LRCLK_MODE); + + regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); + + wm8996_init_gpio(codec); + + if (i2c->irq) { + if (wm8996->pdata.irq_flags) + irq_flags = wm8996->pdata.irq_flags; + else + irq_flags = IRQF_TRIGGER_LOW; + + irq_flags |= IRQF_ONESHOT; + + if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) + ret = request_threaded_irq(i2c->irq, NULL, + wm8996_edge_irq, + irq_flags, "wm8996", codec); + else + ret = request_threaded_irq(i2c->irq, NULL, wm8996_irq, + irq_flags, "wm8996", codec); + + if (ret == 0) { + /* Unmask the interrupt */ + snd_soc_update_bits(codec, WM8996_INTERRUPT_CONTROL, + WM8996_IM_IRQ, 0); + + /* Enable error reporting and DC servo status */ + snd_soc_update_bits(codec, + WM8996_INTERRUPT_STATUS_2_MASK, + WM8996_IM_DCS_DONE_23_EINT | + WM8996_IM_DCS_DONE_01_EINT | + WM8996_IM_FLL_LOCK_EINT | + WM8996_IM_FIFOS_ERR_EINT, + 0); + } else { + dev_err(codec->dev, "Failed to request IRQ: %d\n", + ret); + } + } + + return 0; + +err_enable: + if (wm8996->pdata.ldo_ena >= 0) + gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0); + + regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); +err_get: + regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); +err: + return ret; +} + +static int wm8996_remove(struct snd_soc_codec *codec) +{ + struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec); + struct i2c_client *i2c = to_i2c_client(codec->dev); + int i; + + snd_soc_update_bits(codec, WM8996_INTERRUPT_CONTROL, + WM8996_IM_IRQ, WM8996_IM_IRQ); + + if (i2c->irq) + free_irq(i2c->irq, codec); + + wm8996_free_gpio(codec); + + for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) + regulator_unregister_notifier(wm8996->supplies[i].consumer, + &wm8996->disable_nb[i]); + regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies); + + return 0; +} + +static struct snd_soc_codec_driver soc_codec_dev_wm8996 = { + .probe = wm8996_probe, + .remove = wm8996_remove, + .set_bias_level = wm8996_set_bias_level, + .seq_notifier = wm8996_seq_notifier, + .reg_cache_size = WM8996_MAX_REGISTER + 1, + .reg_word_size = sizeof(u16), + .reg_cache_default = wm8996_reg, + .volatile_register = wm8996_volatile_register, + .readable_register = wm8996_readable_register, + .compress_type = SND_SOC_RBTREE_COMPRESSION, + .controls = wm8996_snd_controls, + .num_controls = ARRAY_SIZE(wm8996_snd_controls), + .dapm_widgets = wm8996_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(wm8996_dapm_widgets), + .dapm_routes = wm8996_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(wm8996_dapm_routes), + .set_pll = wm8996_set_fll, +}; + +#define WM8996_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ + SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000) +#define WM8996_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\ + SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE |\ + SNDRV_PCM_FMTBIT_S32_LE) + +static struct snd_soc_dai_ops wm8996_dai_ops = { + .set_fmt = wm8996_set_fmt, + .hw_params = wm8996_hw_params, + .set_sysclk = wm8996_set_sysclk, +}; + +static struct snd_soc_dai_driver wm8996_dai[] = { + { + .name = "wm8996-aif1", + .playback = { + .stream_name = "AIF1 Playback", + .channels_min = 1, + .channels_max = 6, + .rates = WM8996_RATES, + .formats = WM8996_FORMATS, + }, + .capture = { + .stream_name = "AIF1 Capture", + .channels_min = 1, + .channels_max = 6, + .rates = WM8996_RATES, + .formats = WM8996_FORMATS, + }, + .ops = &wm8996_dai_ops, + }, + { + .name = "wm8996-aif2", + .playback = { + .stream_name = "AIF2 Playback", + .channels_min = 1, + .channels_max = 2, + .rates = WM8996_RATES, + .formats = WM8996_FORMATS, + }, + .capture = { + .stream_name = "AIF2 Capture", + .channels_min = 1, + .channels_max = 2, + .rates = WM8996_RATES, + .formats = WM8996_FORMATS, + }, + .ops = &wm8996_dai_ops, + }, +}; + +static __devinit int wm8996_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct wm8996_priv *wm8996; + int ret; + + wm8996 = kzalloc(sizeof(struct wm8996_priv), GFP_KERNEL); + if (wm8996 == NULL) + return -ENOMEM; + + i2c_set_clientdata(i2c, wm8996); + + if (dev_get_platdata(&i2c->dev)) + memcpy(&wm8996->pdata, dev_get_platdata(&i2c->dev), + sizeof(wm8996->pdata)); + + if (wm8996->pdata.ldo_ena > 0) { + ret = gpio_request_one(wm8996->pdata.ldo_ena, + GPIOF_OUT_INIT_LOW, "WM8996 ENA"); + if (ret < 0) { + dev_err(&i2c->dev, "Failed to request GPIO %d: %d\n", + wm8996->pdata.ldo_ena, ret); + goto err; + } + } + + ret = snd_soc_register_codec(&i2c->dev, + &soc_codec_dev_wm8996, wm8996_dai, + ARRAY_SIZE(wm8996_dai)); + if (ret < 0) + goto err_gpio; + + return ret; + +err_gpio: + if (wm8996->pdata.ldo_ena > 0) + gpio_free(wm8996->pdata.ldo_ena); +err: + kfree(wm8996); + + return ret; +} + +static __devexit int wm8996_i2c_remove(struct i2c_client *client) +{ + struct wm8996_priv *wm8996 = i2c_get_clientdata(client); + + snd_soc_unregister_codec(&client->dev); + if (wm8996->pdata.ldo_ena > 0) + gpio_free(wm8996->pdata.ldo_ena); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static const struct i2c_device_id wm8996_i2c_id[] = { + { "wm8996", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, wm8996_i2c_id); + +static struct i2c_driver wm8996_i2c_driver = { + .driver = { + .name = "wm8996", + .owner = THIS_MODULE, + }, + .probe = wm8996_i2c_probe, + .remove = __devexit_p(wm8996_i2c_remove), + .id_table = wm8996_i2c_id, +}; + +static int __init wm8996_modinit(void) +{ + int ret; + + ret = i2c_add_driver(&wm8996_i2c_driver); + if (ret != 0) { + printk(KERN_ERR "Failed to register WM8996 I2C driver: %d\n", + ret); + } + + return ret; +} +module_init(wm8996_modinit); + +static void __exit wm8996_exit(void) +{ + i2c_del_driver(&wm8996_i2c_driver); +} +module_exit(wm8996_exit); + +MODULE_DESCRIPTION("ASoC WM8996 driver"); +MODULE_AUTHOR("Mark Brown "); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/wm8996.h b/sound/soc/codecs/wm8996.h new file mode 100644 index 0000000..0fde643 --- /dev/null +++ b/sound/soc/codecs/wm8996.h @@ -0,0 +1,3717 @@ +/* + * wm8996.h - WM8996 audio codec interface + * + * Copyright 2011 Wolfson Microelectronics PLC. + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _WM8996_H +#define _WM8996_H + +#define WM8996_SYSCLK_MCLK1 1 +#define WM8996_SYSCLK_MCLK2 2 +#define WM8996_SYSCLK_FLL 3 + +#define WM8996_FLL_MCLK1 1 +#define WM8996_FLL_MCLK2 2 +#define WM8996_FLL_DACLRCLK1 3 +#define WM8996_FLL_BCLK1 4 + +typedef void (*wm8996_polarity_fn)(struct snd_soc_codec *codec, int polarity); + +int wm8996_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, + wm8996_polarity_fn polarity_cb); + +/* + * Register values. + */ +#define WM8996_SOFTWARE_RESET 0x00 +#define WM8996_POWER_MANAGEMENT_1 0x01 +#define WM8996_POWER_MANAGEMENT_2 0x02 +#define WM8996_POWER_MANAGEMENT_3 0x03 +#define WM8996_POWER_MANAGEMENT_4 0x04 +#define WM8996_POWER_MANAGEMENT_5 0x05 +#define WM8996_POWER_MANAGEMENT_6 0x06 +#define WM8996_POWER_MANAGEMENT_7 0x07 +#define WM8996_POWER_MANAGEMENT_8 0x08 +#define WM8996_LEFT_LINE_INPUT_VOLUME 0x10 +#define WM8996_RIGHT_LINE_INPUT_VOLUME 0x11 +#define WM8996_LINE_INPUT_CONTROL 0x12 +#define WM8996_DAC1_HPOUT1_VOLUME 0x15 +#define WM8996_DAC2_HPOUT2_VOLUME 0x16 +#define WM8996_DAC1_LEFT_VOLUME 0x18 +#define WM8996_DAC1_RIGHT_VOLUME 0x19 +#define WM8996_DAC2_LEFT_VOLUME 0x1A +#define WM8996_DAC2_RIGHT_VOLUME 0x1B +#define WM8996_OUTPUT1_LEFT_VOLUME 0x1C +#define WM8996_OUTPUT1_RIGHT_VOLUME 0x1D +#define WM8996_OUTPUT2_LEFT_VOLUME 0x1E +#define WM8996_OUTPUT2_RIGHT_VOLUME 0x1F +#define WM8996_MICBIAS_1 0x20 +#define WM8996_MICBIAS_2 0x21 +#define WM8996_LDO_1 0x28 +#define WM8996_LDO_2 0x29 +#define WM8996_ACCESSORY_DETECT_MODE_1 0x30 +#define WM8996_ACCESSORY_DETECT_MODE_2 0x31 +#define WM8996_HEADPHONE_DETECT_1 0x34 +#define WM8996_HEADPHONE_DETECT_2 0x35 +#define WM8996_MIC_DETECT_1 0x38 +#define WM8996_MIC_DETECT_2 0x39 +#define WM8996_MIC_DETECT_3 0x3A +#define WM8996_CHARGE_PUMP_1 0x40 +#define WM8996_CHARGE_PUMP_2 0x41 +#define WM8996_DC_SERVO_1 0x50 +#define WM8996_DC_SERVO_2 0x51 +#define WM8996_DC_SERVO_3 0x52 +#define WM8996_DC_SERVO_5 0x54 +#define WM8996_DC_SERVO_6 0x55 +#define WM8996_DC_SERVO_7 0x56 +#define WM8996_DC_SERVO_READBACK_0 0x57 +#define WM8996_ANALOGUE_HP_1 0x60 +#define WM8996_ANALOGUE_HP_2 0x61 +#define WM8996_CHIP_REVISION 0x100 +#define WM8996_CONTROL_INTERFACE_1 0x101 +#define WM8996_WRITE_SEQUENCER_CTRL_1 0x110 +#define WM8996_WRITE_SEQUENCER_CTRL_2 0x111 +#define WM8996_AIF_CLOCKING_1 0x200 +#define WM8996_AIF_CLOCKING_2 0x201 +#define WM8996_CLOCKING_1 0x208 +#define WM8996_CLOCKING_2 0x209 +#define WM8996_AIF_RATE 0x210 +#define WM8996_FLL_CONTROL_1 0x220 +#define WM8996_FLL_CONTROL_2 0x221 +#define WM8996_FLL_CONTROL_3 0x222 +#define WM8996_FLL_CONTROL_4 0x223 +#define WM8996_FLL_CONTROL_5 0x224 +#define WM8996_FLL_CONTROL_6 0x225 +#define WM8996_FLL_EFS_1 0x226 +#define WM8996_FLL_EFS_2 0x227 +#define WM8996_AIF1_CONTROL 0x300 +#define WM8996_AIF1_BCLK 0x301 +#define WM8996_AIF1_TX_LRCLK_1 0x302 +#define WM8996_AIF1_TX_LRCLK_2 0x303 +#define WM8996_AIF1_RX_LRCLK_1 0x304 +#define WM8996_AIF1_RX_LRCLK_2 0x305 +#define WM8996_AIF1TX_DATA_CONFIGURATION_1 0x306 +#define WM8996_AIF1TX_DATA_CONFIGURATION_2 0x307 +#define WM8996_AIF1RX_DATA_CONFIGURATION 0x308 +#define WM8996_AIF1TX_CHANNEL_0_CONFIGURATION 0x309 +#define WM8996_AIF1TX_CHANNEL_1_CONFIGURATION 0x30A +#define WM8996_AIF1TX_CHANNEL_2_CONFIGURATION 0x30B +#define WM8996_AIF1TX_CHANNEL_3_CONFIGURATION 0x30C +#define WM8996_AIF1TX_CHANNEL_4_CONFIGURATION 0x30D +#define WM8996_AIF1TX_CHANNEL_5_CONFIGURATION 0x30E +#define WM8996_AIF1RX_CHANNEL_0_CONFIGURATION 0x30F +#define WM8996_AIF1RX_CHANNEL_1_CONFIGURATION 0x310 +#define WM8996_AIF1RX_CHANNEL_2_CONFIGURATION 0x311 +#define WM8996_AIF1RX_CHANNEL_3_CONFIGURATION 0x312 +#define WM8996_AIF1RX_CHANNEL_4_CONFIGURATION 0x313 +#define WM8996_AIF1RX_CHANNEL_5_CONFIGURATION 0x314 +#define WM8996_AIF1RX_MONO_CONFIGURATION 0x315 +#define WM8996_AIF1TX_TEST 0x31A +#define WM8996_AIF2_CONTROL 0x320 +#define WM8996_AIF2_BCLK 0x321 +#define WM8996_AIF2_TX_LRCLK_1 0x322 +#define WM8996_AIF2_TX_LRCLK_2 0x323 +#define WM8996_AIF2_RX_LRCLK_1 0x324 +#define WM8996_AIF2_RX_LRCLK_2 0x325 +#define WM8996_AIF2TX_DATA_CONFIGURATION_1 0x326 +#define WM8996_AIF2TX_DATA_CONFIGURATION_2 0x327 +#define WM8996_AIF2RX_DATA_CONFIGURATION 0x328 +#define WM8996_AIF2TX_CHANNEL_0_CONFIGURATION 0x329 +#define WM8996_AIF2TX_CHANNEL_1_CONFIGURATION 0x32A +#define WM8996_AIF2RX_CHANNEL_0_CONFIGURATION 0x32B +#define WM8996_AIF2RX_CHANNEL_1_CONFIGURATION 0x32C +#define WM8996_AIF2RX_MONO_CONFIGURATION 0x32D +#define WM8996_AIF2TX_TEST 0x32F +#define WM8996_DSP1_TX_LEFT_VOLUME 0x400 +#define WM8996_DSP1_TX_RIGHT_VOLUME 0x401 +#define WM8996_DSP1_RX_LEFT_VOLUME 0x402 +#define WM8996_DSP1_RX_RIGHT_VOLUME 0x403 +#define WM8996_DSP1_TX_FILTERS 0x410 +#define WM8996_DSP1_RX_FILTERS_1 0x420 +#define WM8996_DSP1_RX_FILTERS_2 0x421 +#define WM8996_DSP1_DRC_1 0x440 +#define WM8996_DSP1_DRC_2 0x441 +#define WM8996_DSP1_DRC_3 0x442 +#define WM8996_DSP1_DRC_4 0x443 +#define WM8996_DSP1_DRC_5 0x444 +#define WM8996_DSP1_RX_EQ_GAINS_1 0x480 +#define WM8996_DSP1_RX_EQ_GAINS_2 0x481 +#define WM8996_DSP1_RX_EQ_BAND_1_A 0x482 +#define WM8996_DSP1_RX_EQ_BAND_1_B 0x483 +#define WM8996_DSP1_RX_EQ_BAND_1_PG 0x484 +#define WM8996_DSP1_RX_EQ_BAND_2_A 0x485 +#define WM8996_DSP1_RX_EQ_BAND_2_B 0x486 +#define WM8996_DSP1_RX_EQ_BAND_2_C 0x487 +#define WM8996_DSP1_RX_EQ_BAND_2_PG 0x488 +#define WM8996_DSP1_RX_EQ_BAND_3_A 0x489 +#define WM8996_DSP1_RX_EQ_BAND_3_B 0x48A +#define WM8996_DSP1_RX_EQ_BAND_3_C 0x48B +#define WM8996_DSP1_RX_EQ_BAND_3_PG 0x48C +#define WM8996_DSP1_RX_EQ_BAND_4_A 0x48D +#define WM8996_DSP1_RX_EQ_BAND_4_B 0x48E +#define WM8996_DSP1_RX_EQ_BAND_4_C 0x48F +#define WM8996_DSP1_RX_EQ_BAND_4_PG 0x490 +#define WM8996_DSP1_RX_EQ_BAND_5_A 0x491 +#define WM8996_DSP1_RX_EQ_BAND_5_B 0x492 +#define WM8996_DSP1_RX_EQ_BAND_5_PG 0x493 +#define WM8996_DSP2_TX_LEFT_VOLUME 0x500 +#define WM8996_DSP2_TX_RIGHT_VOLUME 0x501 +#define WM8996_DSP2_RX_LEFT_VOLUME 0x502 +#define WM8996_DSP2_RX_RIGHT_VOLUME 0x503 +#define WM8996_DSP2_TX_FILTERS 0x510 +#define WM8996_DSP2_RX_FILTERS_1 0x520 +#define WM8996_DSP2_RX_FILTERS_2 0x521 +#define WM8996_DSP2_DRC_1 0x540 +#define WM8996_DSP2_DRC_2 0x541 +#define WM8996_DSP2_DRC_3 0x542 +#define WM8996_DSP2_DRC_4 0x543 +#define WM8996_DSP2_DRC_5 0x544 +#define WM8996_DSP2_RX_EQ_GAINS_1 0x580 +#define WM8996_DSP2_RX_EQ_GAINS_2 0x581 +#define WM8996_DSP2_RX_EQ_BAND_1_A 0x582 +#define WM8996_DSP2_RX_EQ_BAND_1_B 0x583 +#define WM8996_DSP2_RX_EQ_BAND_1_PG 0x584 +#define WM8996_DSP2_RX_EQ_BAND_2_A 0x585 +#define WM8996_DSP2_RX_EQ_BAND_2_B 0x586 +#define WM8996_DSP2_RX_EQ_BAND_2_C 0x587 +#define WM8996_DSP2_RX_EQ_BAND_2_PG 0x588 +#define WM8996_DSP2_RX_EQ_BAND_3_A 0x589 +#define WM8996_DSP2_RX_EQ_BAND_3_B 0x58A +#define WM8996_DSP2_RX_EQ_BAND_3_C 0x58B +#define WM8996_DSP2_RX_EQ_BAND_3_PG 0x58C +#define WM8996_DSP2_RX_EQ_BAND_4_A 0x58D +#define WM8996_DSP2_RX_EQ_BAND_4_B 0x58E +#define WM8996_DSP2_RX_EQ_BAND_4_C 0x58F +#define WM8996_DSP2_RX_EQ_BAND_4_PG 0x590 +#define WM8996_DSP2_RX_EQ_BAND_5_A 0x591 +#define WM8996_DSP2_RX_EQ_BAND_5_B 0x592 +#define WM8996_DSP2_RX_EQ_BAND_5_PG 0x593 +#define WM8996_DAC1_MIXER_VOLUMES 0x600 +#define WM8996_DAC1_LEFT_MIXER_ROUTING 0x601 +#define WM8996_DAC1_RIGHT_MIXER_ROUTING 0x602 +#define WM8996_DAC2_MIXER_VOLUMES 0x603 +#define WM8996_DAC2_LEFT_MIXER_ROUTING 0x604 +#define WM8996_DAC2_RIGHT_MIXER_ROUTING 0x605 +#define WM8996_DSP1_TX_LEFT_MIXER_ROUTING 0x606 +#define WM8996_DSP1_TX_RIGHT_MIXER_ROUTING 0x607 +#define WM8996_DSP2_TX_LEFT_MIXER_ROUTING 0x608 +#define WM8996_DSP2_TX_RIGHT_MIXER_ROUTING 0x609 +#define WM8996_DSP_TX_MIXER_SELECT 0x60A +#define WM8996_DAC_SOFTMUTE 0x610 +#define WM8996_OVERSAMPLING 0x620 +#define WM8996_SIDETONE 0x621 +#define WM8996_GPIO_1 0x700 +#define WM8996_GPIO_2 0x701 +#define WM8996_GPIO_3 0x702 +#define WM8996_GPIO_4 0x703 +#define WM8996_GPIO_5 0x704 +#define WM8996_PULL_CONTROL_1 0x720 +#define WM8996_PULL_CONTROL_2 0x721 +#define WM8996_INTERRUPT_STATUS_1 0x730 +#define WM8996_INTERRUPT_STATUS_2 0x731 +#define WM8996_INTERRUPT_RAW_STATUS_2 0x732 +#define WM8996_INTERRUPT_STATUS_1_MASK 0x738 +#define WM8996_INTERRUPT_STATUS_2_MASK 0x739 +#define WM8996_INTERRUPT_CONTROL 0x740 +#define WM8996_LEFT_PDM_SPEAKER 0x800 +#define WM8996_RIGHT_PDM_SPEAKER 0x801 +#define WM8996_PDM_SPEAKER_MUTE_SEQUENCE 0x802 +#define WM8996_PDM_SPEAKER_VOLUME 0x803 +#define WM8996_WRITE_SEQUENCER_0 0x3000 +#define WM8996_WRITE_SEQUENCER_1 0x3001 +#define WM8996_WRITE_SEQUENCER_2 0x3002 +#define WM8996_WRITE_SEQUENCER_3 0x3003 +#define WM8996_WRITE_SEQUENCER_4 0x3004 +#define WM8996_WRITE_SEQUENCER_5 0x3005 +#define WM8996_WRITE_SEQUENCER_6 0x3006 +#define WM8996_WRITE_SEQUENCER_7 0x3007 +#define WM8996_WRITE_SEQUENCER_8 0x3008 +#define WM8996_WRITE_SEQUENCER_9 0x3009 +#define WM8996_WRITE_SEQUENCER_10 0x300A +#define WM8996_WRITE_SEQUENCER_11 0x300B +#define WM8996_WRITE_SEQUENCER_12 0x300C +#define WM8996_WRITE_SEQUENCER_13 0x300D +#define WM8996_WRITE_SEQUENCER_14 0x300E +#define WM8996_WRITE_SEQUENCER_15 0x300F +#define WM8996_WRITE_SEQUENCER_16 0x3010 +#define WM8996_WRITE_SEQUENCER_17 0x3011 +#define WM8996_WRITE_SEQUENCER_18 0x3012 +#define WM8996_WRITE_SEQUENCER_19 0x3013 +#define WM8996_WRITE_SEQUENCER_20 0x3014 +#define WM8996_WRITE_SEQUENCER_21 0x3015 +#define WM8996_WRITE_SEQUENCER_22 0x3016 +#define WM8996_WRITE_SEQUENCER_23 0x3017 +#define WM8996_WRITE_SEQUENCER_24 0x3018 +#define WM8996_WRITE_SEQUENCER_25 0x3019 +#define WM8996_WRITE_SEQUENCER_26 0x301A +#define WM8996_WRITE_SEQUENCER_27 0x301B +#define WM8996_WRITE_SEQUENCER_28 0x301C +#define WM8996_WRITE_SEQUENCER_29 0x301D +#define WM8996_WRITE_SEQUENCER_30 0x301E +#define WM8996_WRITE_SEQUENCER_31 0x301F +#define WM8996_WRITE_SEQUENCER_32 0x3020 +#define WM8996_WRITE_SEQUENCER_33 0x3021 +#define WM8996_WRITE_SEQUENCER_34 0x3022 +#define WM8996_WRITE_SEQUENCER_35 0x3023 +#define WM8996_WRITE_SEQUENCER_36 0x3024 +#define WM8996_WRITE_SEQUENCER_37 0x3025 +#define WM8996_WRITE_SEQUENCER_38 0x3026 +#define WM8996_WRITE_SEQUENCER_39 0x3027 +#define WM8996_WRITE_SEQUENCER_40 0x3028 +#define WM8996_WRITE_SEQUENCER_41 0x3029 +#define WM8996_WRITE_SEQUENCER_42 0x302A +#define WM8996_WRITE_SEQUENCER_43 0x302B +#define WM8996_WRITE_SEQUENCER_44 0x302C +#define WM8996_WRITE_SEQUENCER_45 0x302D +#define WM8996_WRITE_SEQUENCER_46 0x302E +#define WM8996_WRITE_SEQUENCER_47 0x302F +#define WM8996_WRITE_SEQUENCER_48 0x3030 +#define WM8996_WRITE_SEQUENCER_49 0x3031 +#define WM8996_WRITE_SEQUENCER_50 0x3032 +#define WM8996_WRITE_SEQUENCER_51 0x3033 +#define WM8996_WRITE_SEQUENCER_52 0x3034 +#define WM8996_WRITE_SEQUENCER_53 0x3035 +#define WM8996_WRITE_SEQUENCER_54 0x3036 +#define WM8996_WRITE_SEQUENCER_55 0x3037 +#define WM8996_WRITE_SEQUENCER_56 0x3038 +#define WM8996_WRITE_SEQUENCER_57 0x3039 +#define WM8996_WRITE_SEQUENCER_58 0x303A +#define WM8996_WRITE_SEQUENCER_59 0x303B +#define WM8996_WRITE_SEQUENCER_60 0x303C +#define WM8996_WRITE_SEQUENCER_61 0x303D +#define WM8996_WRITE_SEQUENCER_62 0x303E +#define WM8996_WRITE_SEQUENCER_63 0x303F +#define WM8996_WRITE_SEQUENCER_64 0x3040 +#define WM8996_WRITE_SEQUENCER_65 0x3041 +#define WM8996_WRITE_SEQUENCER_66 0x3042 +#define WM8996_WRITE_SEQUENCER_67 0x3043 +#define WM8996_WRITE_SEQUENCER_68 0x3044 +#define WM8996_WRITE_SEQUENCER_69 0x3045 +#define WM8996_WRITE_SEQUENCER_70 0x3046 +#define WM8996_WRITE_SEQUENCER_71 0x3047 +#define WM8996_WRITE_SEQUENCER_72 0x3048 +#define WM8996_WRITE_SEQUENCER_73 0x3049 +#define WM8996_WRITE_SEQUENCER_74 0x304A +#define WM8996_WRITE_SEQUENCER_75 0x304B +#define WM8996_WRITE_SEQUENCER_76 0x304C +#define WM8996_WRITE_SEQUENCER_77 0x304D +#define WM8996_WRITE_SEQUENCER_78 0x304E +#define WM8996_WRITE_SEQUENCER_79 0x304F +#define WM8996_WRITE_SEQUENCER_80 0x3050 +#define WM8996_WRITE_SEQUENCER_81 0x3051 +#define WM8996_WRITE_SEQUENCER_82 0x3052 +#define WM8996_WRITE_SEQUENCER_83 0x3053 +#define WM8996_WRITE_SEQUENCER_84 0x3054 +#define WM8996_WRITE_SEQUENCER_85 0x3055 +#define WM8996_WRITE_SEQUENCER_86 0x3056 +#define WM8996_WRITE_SEQUENCER_87 0x3057 +#define WM8996_WRITE_SEQUENCER_88 0x3058 +#define WM8996_WRITE_SEQUENCER_89 0x3059 +#define WM8996_WRITE_SEQUENCER_90 0x305A +#define WM8996_WRITE_SEQUENCER_91 0x305B +#define WM8996_WRITE_SEQUENCER_92 0x305C +#define WM8996_WRITE_SEQUENCER_93 0x305D +#define WM8996_WRITE_SEQUENCER_94 0x305E +#define WM8996_WRITE_SEQUENCER_95 0x305F +#define WM8996_WRITE_SEQUENCER_96 0x3060 +#define WM8996_WRITE_SEQUENCER_97 0x3061 +#define WM8996_WRITE_SEQUENCER_98 0x3062 +#define WM8996_WRITE_SEQUENCER_99 0x3063 +#define WM8996_WRITE_SEQUENCER_100 0x3064 +#define WM8996_WRITE_SEQUENCER_101 0x3065 +#define WM8996_WRITE_SEQUENCER_102 0x3066 +#define WM8996_WRITE_SEQUENCER_103 0x3067 +#define WM8996_WRITE_SEQUENCER_104 0x3068 +#define WM8996_WRITE_SEQUENCER_105 0x3069 +#define WM8996_WRITE_SEQUENCER_106 0x306A +#define WM8996_WRITE_SEQUENCER_107 0x306B +#define WM8996_WRITE_SEQUENCER_108 0x306C +#define WM8996_WRITE_SEQUENCER_109 0x306D +#define WM8996_WRITE_SEQUENCER_110 0x306E +#define WM8996_WRITE_SEQUENCER_111 0x306F +#define WM8996_WRITE_SEQUENCER_112 0x3070 +#define WM8996_WRITE_SEQUENCER_113 0x3071 +#define WM8996_WRITE_SEQUENCER_114 0x3072 +#define WM8996_WRITE_SEQUENCER_115 0x3073 +#define WM8996_WRITE_SEQUENCER_116 0x3074 +#define WM8996_WRITE_SEQUENCER_117 0x3075 +#define WM8996_WRITE_SEQUENCER_118 0x3076 +#define WM8996_WRITE_SEQUENCER_119 0x3077 +#define WM8996_WRITE_SEQUENCER_120 0x3078 +#define WM8996_WRITE_SEQUENCER_121 0x3079 +#define WM8996_WRITE_SEQUENCER_122 0x307A +#define WM8996_WRITE_SEQUENCER_123 0x307B +#define WM8996_WRITE_SEQUENCER_124 0x307C +#define WM8996_WRITE_SEQUENCER_125 0x307D +#define WM8996_WRITE_SEQUENCER_126 0x307E +#define WM8996_WRITE_SEQUENCER_127 0x307F +#define WM8996_WRITE_SEQUENCER_128 0x3080 +#define WM8996_WRITE_SEQUENCER_129 0x3081 +#define WM8996_WRITE_SEQUENCER_130 0x3082 +#define WM8996_WRITE_SEQUENCER_131 0x3083 +#define WM8996_WRITE_SEQUENCER_132 0x3084 +#define WM8996_WRITE_SEQUENCER_133 0x3085 +#define WM8996_WRITE_SEQUENCER_134 0x3086 +#define WM8996_WRITE_SEQUENCER_135 0x3087 +#define WM8996_WRITE_SEQUENCER_136 0x3088 +#define WM8996_WRITE_SEQUENCER_137 0x3089 +#define WM8996_WRITE_SEQUENCER_138 0x308A +#define WM8996_WRITE_SEQUENCER_139 0x308B +#define WM8996_WRITE_SEQUENCER_140 0x308C +#define WM8996_WRITE_SEQUENCER_141 0x308D +#define WM8996_WRITE_SEQUENCER_142 0x308E +#define WM8996_WRITE_SEQUENCER_143 0x308F +#define WM8996_WRITE_SEQUENCER_144 0x3090 +#define WM8996_WRITE_SEQUENCER_145 0x3091 +#define WM8996_WRITE_SEQUENCER_146 0x3092 +#define WM8996_WRITE_SEQUENCER_147 0x3093 +#define WM8996_WRITE_SEQUENCER_148 0x3094 +#define WM8996_WRITE_SEQUENCER_149 0x3095 +#define WM8996_WRITE_SEQUENCER_150 0x3096 +#define WM8996_WRITE_SEQUENCER_151 0x3097 +#define WM8996_WRITE_SEQUENCER_152 0x3098 +#define WM8996_WRITE_SEQUENCER_153 0x3099 +#define WM8996_WRITE_SEQUENCER_154 0x309A +#define WM8996_WRITE_SEQUENCER_155 0x309B +#define WM8996_WRITE_SEQUENCER_156 0x309C +#define WM8996_WRITE_SEQUENCER_157 0x309D +#define WM8996_WRITE_SEQUENCER_158 0x309E +#define WM8996_WRITE_SEQUENCER_159 0x309F +#define WM8996_WRITE_SEQUENCER_160 0x30A0 +#define WM8996_WRITE_SEQUENCER_161 0x30A1 +#define WM8996_WRITE_SEQUENCER_162 0x30A2 +#define WM8996_WRITE_SEQUENCER_163 0x30A3 +#define WM8996_WRITE_SEQUENCER_164 0x30A4 +#define WM8996_WRITE_SEQUENCER_165 0x30A5 +#define WM8996_WRITE_SEQUENCER_166 0x30A6 +#define WM8996_WRITE_SEQUENCER_167 0x30A7 +#define WM8996_WRITE_SEQUENCER_168 0x30A8 +#define WM8996_WRITE_SEQUENCER_169 0x30A9 +#define WM8996_WRITE_SEQUENCER_170 0x30AA +#define WM8996_WRITE_SEQUENCER_171 0x30AB +#define WM8996_WRITE_SEQUENCER_172 0x30AC +#define WM8996_WRITE_SEQUENCER_173 0x30AD +#define WM8996_WRITE_SEQUENCER_174 0x30AE +#define WM8996_WRITE_SEQUENCER_175 0x30AF +#define WM8996_WRITE_SEQUENCER_176 0x30B0 +#define WM8996_WRITE_SEQUENCER_177 0x30B1 +#define WM8996_WRITE_SEQUENCER_178 0x30B2 +#define WM8996_WRITE_SEQUENCER_179 0x30B3 +#define WM8996_WRITE_SEQUENCER_180 0x30B4 +#define WM8996_WRITE_SEQUENCER_181 0x30B5 +#define WM8996_WRITE_SEQUENCER_182 0x30B6 +#define WM8996_WRITE_SEQUENCER_183 0x30B7 +#define WM8996_WRITE_SEQUENCER_184 0x30B8 +#define WM8996_WRITE_SEQUENCER_185 0x30B9 +#define WM8996_WRITE_SEQUENCER_186 0x30BA +#define WM8996_WRITE_SEQUENCER_187 0x30BB +#define WM8996_WRITE_SEQUENCER_188 0x30BC +#define WM8996_WRITE_SEQUENCER_189 0x30BD +#define WM8996_WRITE_SEQUENCER_190 0x30BE +#define WM8996_WRITE_SEQUENCER_191 0x30BF +#define WM8996_WRITE_SEQUENCER_192 0x30C0 +#define WM8996_WRITE_SEQUENCER_193 0x30C1 +#define WM8996_WRITE_SEQUENCER_194 0x30C2 +#define WM8996_WRITE_SEQUENCER_195 0x30C3 +#define WM8996_WRITE_SEQUENCER_196 0x30C4 +#define WM8996_WRITE_SEQUENCER_197 0x30C5 +#define WM8996_WRITE_SEQUENCER_198 0x30C6 +#define WM8996_WRITE_SEQUENCER_199 0x30C7 +#define WM8996_WRITE_SEQUENCER_200 0x30C8 +#define WM8996_WRITE_SEQUENCER_201 0x30C9 +#define WM8996_WRITE_SEQUENCER_202 0x30CA +#define WM8996_WRITE_SEQUENCER_203 0x30CB +#define WM8996_WRITE_SEQUENCER_204 0x30CC +#define WM8996_WRITE_SEQUENCER_205 0x30CD +#define WM8996_WRITE_SEQUENCER_206 0x30CE +#define WM8996_WRITE_SEQUENCER_207 0x30CF +#define WM8996_WRITE_SEQUENCER_208 0x30D0 +#define WM8996_WRITE_SEQUENCER_209 0x30D1 +#define WM8996_WRITE_SEQUENCER_210 0x30D2 +#define WM8996_WRITE_SEQUENCER_211 0x30D3 +#define WM8996_WRITE_SEQUENCER_212 0x30D4 +#define WM8996_WRITE_SEQUENCER_213 0x30D5 +#define WM8996_WRITE_SEQUENCER_214 0x30D6 +#define WM8996_WRITE_SEQUENCER_215 0x30D7 +#define WM8996_WRITE_SEQUENCER_216 0x30D8 +#define WM8996_WRITE_SEQUENCER_217 0x30D9 +#define WM8996_WRITE_SEQUENCER_218 0x30DA +#define WM8996_WRITE_SEQUENCER_219 0x30DB +#define WM8996_WRITE_SEQUENCER_220 0x30DC +#define WM8996_WRITE_SEQUENCER_221 0x30DD +#define WM8996_WRITE_SEQUENCER_222 0x30DE +#define WM8996_WRITE_SEQUENCER_223 0x30DF +#define WM8996_WRITE_SEQUENCER_224 0x30E0 +#define WM8996_WRITE_SEQUENCER_225 0x30E1 +#define WM8996_WRITE_SEQUENCER_226 0x30E2 +#define WM8996_WRITE_SEQUENCER_227 0x30E3 +#define WM8996_WRITE_SEQUENCER_228 0x30E4 +#define WM8996_WRITE_SEQUENCER_229 0x30E5 +#define WM8996_WRITE_SEQUENCER_230 0x30E6 +#define WM8996_WRITE_SEQUENCER_231 0x30E7 +#define WM8996_WRITE_SEQUENCER_232 0x30E8 +#define WM8996_WRITE_SEQUENCER_233 0x30E9 +#define WM8996_WRITE_SEQUENCER_234 0x30EA +#define WM8996_WRITE_SEQUENCER_235 0x30EB +#define WM8996_WRITE_SEQUENCER_236 0x30EC +#define WM8996_WRITE_SEQUENCER_237 0x30ED +#define WM8996_WRITE_SEQUENCER_238 0x30EE +#define WM8996_WRITE_SEQUENCER_239 0x30EF +#define WM8996_WRITE_SEQUENCER_240 0x30F0 +#define WM8996_WRITE_SEQUENCER_241 0x30F1 +#define WM8996_WRITE_SEQUENCER_242 0x30F2 +#define WM8996_WRITE_SEQUENCER_243 0x30F3 +#define WM8996_WRITE_SEQUENCER_244 0x30F4 +#define WM8996_WRITE_SEQUENCER_245 0x30F5 +#define WM8996_WRITE_SEQUENCER_246 0x30F6 +#define WM8996_WRITE_SEQUENCER_247 0x30F7 +#define WM8996_WRITE_SEQUENCER_248 0x30F8 +#define WM8996_WRITE_SEQUENCER_249 0x30F9 +#define WM8996_WRITE_SEQUENCER_250 0x30FA +#define WM8996_WRITE_SEQUENCER_251 0x30FB +#define WM8996_WRITE_SEQUENCER_252 0x30FC +#define WM8996_WRITE_SEQUENCER_253 0x30FD +#define WM8996_WRITE_SEQUENCER_254 0x30FE +#define WM8996_WRITE_SEQUENCER_255 0x30FF +#define WM8996_WRITE_SEQUENCER_256 0x3100 +#define WM8996_WRITE_SEQUENCER_257 0x3101 +#define WM8996_WRITE_SEQUENCER_258 0x3102 +#define WM8996_WRITE_SEQUENCER_259 0x3103 +#define WM8996_WRITE_SEQUENCER_260 0x3104 +#define WM8996_WRITE_SEQUENCER_261 0x3105 +#define WM8996_WRITE_SEQUENCER_262 0x3106 +#define WM8996_WRITE_SEQUENCER_263 0x3107 +#define WM8996_WRITE_SEQUENCER_264 0x3108 +#define WM8996_WRITE_SEQUENCER_265 0x3109 +#define WM8996_WRITE_SEQUENCER_266 0x310A +#define WM8996_WRITE_SEQUENCER_267 0x310B +#define WM8996_WRITE_SEQUENCER_268 0x310C +#define WM8996_WRITE_SEQUENCER_269 0x310D +#define WM8996_WRITE_SEQUENCER_270 0x310E +#define WM8996_WRITE_SEQUENCER_271 0x310F +#define WM8996_WRITE_SEQUENCER_272 0x3110 +#define WM8996_WRITE_SEQUENCER_273 0x3111 +#define WM8996_WRITE_SEQUENCER_274 0x3112 +#define WM8996_WRITE_SEQUENCER_275 0x3113 +#define WM8996_WRITE_SEQUENCER_276 0x3114 +#define WM8996_WRITE_SEQUENCER_277 0x3115 +#define WM8996_WRITE_SEQUENCER_278 0x3116 +#define WM8996_WRITE_SEQUENCER_279 0x3117 +#define WM8996_WRITE_SEQUENCER_280 0x3118 +#define WM8996_WRITE_SEQUENCER_281 0x3119 +#define WM8996_WRITE_SEQUENCER_282 0x311A +#define WM8996_WRITE_SEQUENCER_283 0x311B +#define WM8996_WRITE_SEQUENCER_284 0x311C +#define WM8996_WRITE_SEQUENCER_285 0x311D +#define WM8996_WRITE_SEQUENCER_286 0x311E +#define WM8996_WRITE_SEQUENCER_287 0x311F +#define WM8996_WRITE_SEQUENCER_288 0x3120 +#define WM8996_WRITE_SEQUENCER_289 0x3121 +#define WM8996_WRITE_SEQUENCER_290 0x3122 +#define WM8996_WRITE_SEQUENCER_291 0x3123 +#define WM8996_WRITE_SEQUENCER_292 0x3124 +#define WM8996_WRITE_SEQUENCER_293 0x3125 +#define WM8996_WRITE_SEQUENCER_294 0x3126 +#define WM8996_WRITE_SEQUENCER_295 0x3127 +#define WM8996_WRITE_SEQUENCER_296 0x3128 +#define WM8996_WRITE_SEQUENCER_297 0x3129 +#define WM8996_WRITE_SEQUENCER_298 0x312A +#define WM8996_WRITE_SEQUENCER_299 0x312B +#define WM8996_WRITE_SEQUENCER_300 0x312C +#define WM8996_WRITE_SEQUENCER_301 0x312D +#define WM8996_WRITE_SEQUENCER_302 0x312E +#define WM8996_WRITE_SEQUENCER_303 0x312F +#define WM8996_WRITE_SEQUENCER_304 0x3130 +#define WM8996_WRITE_SEQUENCER_305 0x3131 +#define WM8996_WRITE_SEQUENCER_306 0x3132 +#define WM8996_WRITE_SEQUENCER_307 0x3133 +#define WM8996_WRITE_SEQUENCER_308 0x3134 +#define WM8996_WRITE_SEQUENCER_309 0x3135 +#define WM8996_WRITE_SEQUENCER_310 0x3136 +#define WM8996_WRITE_SEQUENCER_311 0x3137 +#define WM8996_WRITE_SEQUENCER_312 0x3138 +#define WM8996_WRITE_SEQUENCER_313 0x3139 +#define WM8996_WRITE_SEQUENCER_314 0x313A +#define WM8996_WRITE_SEQUENCER_315 0x313B +#define WM8996_WRITE_SEQUENCER_316 0x313C +#define WM8996_WRITE_SEQUENCER_317 0x313D +#define WM8996_WRITE_SEQUENCER_318 0x313E +#define WM8996_WRITE_SEQUENCER_319 0x313F +#define WM8996_WRITE_SEQUENCER_320 0x3140 +#define WM8996_WRITE_SEQUENCER_321 0x3141 +#define WM8996_WRITE_SEQUENCER_322 0x3142 +#define WM8996_WRITE_SEQUENCER_323 0x3143 +#define WM8996_WRITE_SEQUENCER_324 0x3144 +#define WM8996_WRITE_SEQUENCER_325 0x3145 +#define WM8996_WRITE_SEQUENCER_326 0x3146 +#define WM8996_WRITE_SEQUENCER_327 0x3147 +#define WM8996_WRITE_SEQUENCER_328 0x3148 +#define WM8996_WRITE_SEQUENCER_329 0x3149 +#define WM8996_WRITE_SEQUENCER_330 0x314A +#define WM8996_WRITE_SEQUENCER_331 0x314B +#define WM8996_WRITE_SEQUENCER_332 0x314C +#define WM8996_WRITE_SEQUENCER_333 0x314D +#define WM8996_WRITE_SEQUENCER_334 0x314E +#define WM8996_WRITE_SEQUENCER_335 0x314F +#define WM8996_WRITE_SEQUENCER_336 0x3150 +#define WM8996_WRITE_SEQUENCER_337 0x3151 +#define WM8996_WRITE_SEQUENCER_338 0x3152 +#define WM8996_WRITE_SEQUENCER_339 0x3153 +#define WM8996_WRITE_SEQUENCER_340 0x3154 +#define WM8996_WRITE_SEQUENCER_341 0x3155 +#define WM8996_WRITE_SEQUENCER_342 0x3156 +#define WM8996_WRITE_SEQUENCER_343 0x3157 +#define WM8996_WRITE_SEQUENCER_344 0x3158 +#define WM8996_WRITE_SEQUENCER_345 0x3159 +#define WM8996_WRITE_SEQUENCER_346 0x315A +#define WM8996_WRITE_SEQUENCER_347 0x315B +#define WM8996_WRITE_SEQUENCER_348 0x315C +#define WM8996_WRITE_SEQUENCER_349 0x315D +#define WM8996_WRITE_SEQUENCER_350 0x315E +#define WM8996_WRITE_SEQUENCER_351 0x315F +#define WM8996_WRITE_SEQUENCER_352 0x3160 +#define WM8996_WRITE_SEQUENCER_353 0x3161 +#define WM8996_WRITE_SEQUENCER_354 0x3162 +#define WM8996_WRITE_SEQUENCER_355 0x3163 +#define WM8996_WRITE_SEQUENCER_356 0x3164 +#define WM8996_WRITE_SEQUENCER_357 0x3165 +#define WM8996_WRITE_SEQUENCER_358 0x3166 +#define WM8996_WRITE_SEQUENCER_359 0x3167 +#define WM8996_WRITE_SEQUENCER_360 0x3168 +#define WM8996_WRITE_SEQUENCER_361 0x3169 +#define WM8996_WRITE_SEQUENCER_362 0x316A +#define WM8996_WRITE_SEQUENCER_363 0x316B +#define WM8996_WRITE_SEQUENCER_364 0x316C +#define WM8996_WRITE_SEQUENCER_365 0x316D +#define WM8996_WRITE_SEQUENCER_366 0x316E +#define WM8996_WRITE_SEQUENCER_367 0x316F +#define WM8996_WRITE_SEQUENCER_368 0x3170 +#define WM8996_WRITE_SEQUENCER_369 0x3171 +#define WM8996_WRITE_SEQUENCER_370 0x3172 +#define WM8996_WRITE_SEQUENCER_371 0x3173 +#define WM8996_WRITE_SEQUENCER_372 0x3174 +#define WM8996_WRITE_SEQUENCER_373 0x3175 +#define WM8996_WRITE_SEQUENCER_374 0x3176 +#define WM8996_WRITE_SEQUENCER_375 0x3177 +#define WM8996_WRITE_SEQUENCER_376 0x3178 +#define WM8996_WRITE_SEQUENCER_377 0x3179 +#define WM8996_WRITE_SEQUENCER_378 0x317A +#define WM8996_WRITE_SEQUENCER_379 0x317B +#define WM8996_WRITE_SEQUENCER_380 0x317C +#define WM8996_WRITE_SEQUENCER_381 0x317D +#define WM8996_WRITE_SEQUENCER_382 0x317E +#define WM8996_WRITE_SEQUENCER_383 0x317F +#define WM8996_WRITE_SEQUENCER_384 0x3180 +#define WM8996_WRITE_SEQUENCER_385 0x3181 +#define WM8996_WRITE_SEQUENCER_386 0x3182 +#define WM8996_WRITE_SEQUENCER_387 0x3183 +#define WM8996_WRITE_SEQUENCER_388 0x3184 +#define WM8996_WRITE_SEQUENCER_389 0x3185 +#define WM8996_WRITE_SEQUENCER_390 0x3186 +#define WM8996_WRITE_SEQUENCER_391 0x3187 +#define WM8996_WRITE_SEQUENCER_392 0x3188 +#define WM8996_WRITE_SEQUENCER_393 0x3189 +#define WM8996_WRITE_SEQUENCER_394 0x318A +#define WM8996_WRITE_SEQUENCER_395 0x318B +#define WM8996_WRITE_SEQUENCER_396 0x318C +#define WM8996_WRITE_SEQUENCER_397 0x318D +#define WM8996_WRITE_SEQUENCER_398 0x318E +#define WM8996_WRITE_SEQUENCER_399 0x318F +#define WM8996_WRITE_SEQUENCER_400 0x3190 +#define WM8996_WRITE_SEQUENCER_401 0x3191 +#define WM8996_WRITE_SEQUENCER_402 0x3192 +#define WM8996_WRITE_SEQUENCER_403 0x3193 +#define WM8996_WRITE_SEQUENCER_404 0x3194 +#define WM8996_WRITE_SEQUENCER_405 0x3195 +#define WM8996_WRITE_SEQUENCER_406 0x3196 +#define WM8996_WRITE_SEQUENCER_407 0x3197 +#define WM8996_WRITE_SEQUENCER_408 0x3198 +#define WM8996_WRITE_SEQUENCER_409 0x3199 +#define WM8996_WRITE_SEQUENCER_410 0x319A +#define WM8996_WRITE_SEQUENCER_411 0x319B +#define WM8996_WRITE_SEQUENCER_412 0x319C +#define WM8996_WRITE_SEQUENCER_413 0x319D +#define WM8996_WRITE_SEQUENCER_414 0x319E +#define WM8996_WRITE_SEQUENCER_415 0x319F +#define WM8996_WRITE_SEQUENCER_416 0x31A0 +#define WM8996_WRITE_SEQUENCER_417 0x31A1 +#define WM8996_WRITE_SEQUENCER_418 0x31A2 +#define WM8996_WRITE_SEQUENCER_419 0x31A3 +#define WM8996_WRITE_SEQUENCER_420 0x31A4 +#define WM8996_WRITE_SEQUENCER_421 0x31A5 +#define WM8996_WRITE_SEQUENCER_422 0x31A6 +#define WM8996_WRITE_SEQUENCER_423 0x31A7 +#define WM8996_WRITE_SEQUENCER_424 0x31A8 +#define WM8996_WRITE_SEQUENCER_425 0x31A9 +#define WM8996_WRITE_SEQUENCER_426 0x31AA +#define WM8996_WRITE_SEQUENCER_427 0x31AB +#define WM8996_WRITE_SEQUENCER_428 0x31AC +#define WM8996_WRITE_SEQUENCER_429 0x31AD +#define WM8996_WRITE_SEQUENCER_430 0x31AE +#define WM8996_WRITE_SEQUENCER_431 0x31AF +#define WM8996_WRITE_SEQUENCER_432 0x31B0 +#define WM8996_WRITE_SEQUENCER_433 0x31B1 +#define WM8996_WRITE_SEQUENCER_434 0x31B2 +#define WM8996_WRITE_SEQUENCER_435 0x31B3 +#define WM8996_WRITE_SEQUENCER_436 0x31B4 +#define WM8996_WRITE_SEQUENCER_437 0x31B5 +#define WM8996_WRITE_SEQUENCER_438 0x31B6 +#define WM8996_WRITE_SEQUENCER_439 0x31B7 +#define WM8996_WRITE_SEQUENCER_440 0x31B8 +#define WM8996_WRITE_SEQUENCER_441 0x31B9 +#define WM8996_WRITE_SEQUENCER_442 0x31BA +#define WM8996_WRITE_SEQUENCER_443 0x31BB +#define WM8996_WRITE_SEQUENCER_444 0x31BC +#define WM8996_WRITE_SEQUENCER_445 0x31BD +#define WM8996_WRITE_SEQUENCER_446 0x31BE +#define WM8996_WRITE_SEQUENCER_447 0x31BF +#define WM8996_WRITE_SEQUENCER_448 0x31C0 +#define WM8996_WRITE_SEQUENCER_449 0x31C1 +#define WM8996_WRITE_SEQUENCER_450 0x31C2 +#define WM8996_WRITE_SEQUENCER_451 0x31C3 +#define WM8996_WRITE_SEQUENCER_452 0x31C4 +#define WM8996_WRITE_SEQUENCER_453 0x31C5 +#define WM8996_WRITE_SEQUENCER_454 0x31C6 +#define WM8996_WRITE_SEQUENCER_455 0x31C7 +#define WM8996_WRITE_SEQUENCER_456 0x31C8 +#define WM8996_WRITE_SEQUENCER_457 0x31C9 +#define WM8996_WRITE_SEQUENCER_458 0x31CA +#define WM8996_WRITE_SEQUENCER_459 0x31CB +#define WM8996_WRITE_SEQUENCER_460 0x31CC +#define WM8996_WRITE_SEQUENCER_461 0x31CD +#define WM8996_WRITE_SEQUENCER_462 0x31CE +#define WM8996_WRITE_SEQUENCER_463 0x31CF +#define WM8996_WRITE_SEQUENCER_464 0x31D0 +#define WM8996_WRITE_SEQUENCER_465 0x31D1 +#define WM8996_WRITE_SEQUENCER_466 0x31D2 +#define WM8996_WRITE_SEQUENCER_467 0x31D3 +#define WM8996_WRITE_SEQUENCER_468 0x31D4 +#define WM8996_WRITE_SEQUENCER_469 0x31D5 +#define WM8996_WRITE_SEQUENCER_470 0x31D6 +#define WM8996_WRITE_SEQUENCER_471 0x31D7 +#define WM8996_WRITE_SEQUENCER_472 0x31D8 +#define WM8996_WRITE_SEQUENCER_473 0x31D9 +#define WM8996_WRITE_SEQUENCER_474 0x31DA +#define WM8996_WRITE_SEQUENCER_475 0x31DB +#define WM8996_WRITE_SEQUENCER_476 0x31DC +#define WM8996_WRITE_SEQUENCER_477 0x31DD +#define WM8996_WRITE_SEQUENCER_478 0x31DE +#define WM8996_WRITE_SEQUENCER_479 0x31DF +#define WM8996_WRITE_SEQUENCER_480 0x31E0 +#define WM8996_WRITE_SEQUENCER_481 0x31E1 +#define WM8996_WRITE_SEQUENCER_482 0x31E2 +#define WM8996_WRITE_SEQUENCER_483 0x31E3 +#define WM8996_WRITE_SEQUENCER_484 0x31E4 +#define WM8996_WRITE_SEQUENCER_485 0x31E5 +#define WM8996_WRITE_SEQUENCER_486 0x31E6 +#define WM8996_WRITE_SEQUENCER_487 0x31E7 +#define WM8996_WRITE_SEQUENCER_488 0x31E8 +#define WM8996_WRITE_SEQUENCER_489 0x31E9 +#define WM8996_WRITE_SEQUENCER_490 0x31EA +#define WM8996_WRITE_SEQUENCER_491 0x31EB +#define WM8996_WRITE_SEQUENCER_492 0x31EC +#define WM8996_WRITE_SEQUENCER_493 0x31ED +#define WM8996_WRITE_SEQUENCER_494 0x31EE +#define WM8996_WRITE_SEQUENCER_495 0x31EF +#define WM8996_WRITE_SEQUENCER_496 0x31F0 +#define WM8996_WRITE_SEQUENCER_497 0x31F1 +#define WM8996_WRITE_SEQUENCER_498 0x31F2 +#define WM8996_WRITE_SEQUENCER_499 0x31F3 +#define WM8996_WRITE_SEQUENCER_500 0x31F4 +#define WM8996_WRITE_SEQUENCER_501 0x31F5 +#define WM8996_WRITE_SEQUENCER_502 0x31F6 +#define WM8996_WRITE_SEQUENCER_503 0x31F7 +#define WM8996_WRITE_SEQUENCER_504 0x31F8 +#define WM8996_WRITE_SEQUENCER_505 0x31F9 +#define WM8996_WRITE_SEQUENCER_506 0x31FA +#define WM8996_WRITE_SEQUENCER_507 0x31FB +#define WM8996_WRITE_SEQUENCER_508 0x31FC +#define WM8996_WRITE_SEQUENCER_509 0x31FD +#define WM8996_WRITE_SEQUENCER_510 0x31FE +#define WM8996_WRITE_SEQUENCER_511 0x31FF + +#define WM8996_REGISTER_COUNT 706 +#define WM8996_MAX_REGISTER 0x31FF + +/* + * Field Definitions. + */ + +/* + * R0 (0x00) - Software Reset + */ +#define WM8996_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */ +#define WM8996_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */ +#define WM8996_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */ + +/* + * R1 (0x01) - Power Management (1) + */ +#define WM8996_MICB2_ENA 0x0200 /* MICB2_ENA */ +#define WM8996_MICB2_ENA_MASK 0x0200 /* MICB2_ENA */ +#define WM8996_MICB2_ENA_SHIFT 9 /* MICB2_ENA */ +#define WM8996_MICB2_ENA_WIDTH 1 /* MICB2_ENA */ +#define WM8996_MICB1_ENA 0x0100 /* MICB1_ENA */ +#define WM8996_MICB1_ENA_MASK 0x0100 /* MICB1_ENA */ +#define WM8996_MICB1_ENA_SHIFT 8 /* MICB1_ENA */ +#define WM8996_MICB1_ENA_WIDTH 1 /* MICB1_ENA */ +#define WM8996_HPOUT2L_ENA 0x0080 /* HPOUT2L_ENA */ +#define WM8996_HPOUT2L_ENA_MASK 0x0080 /* HPOUT2L_ENA */ +#define WM8996_HPOUT2L_ENA_SHIFT 7 /* HPOUT2L_ENA */ +#define WM8996_HPOUT2L_ENA_WIDTH 1 /* HPOUT2L_ENA */ +#define WM8996_HPOUT2R_ENA 0x0040 /* HPOUT2R_ENA */ +#define WM8996_HPOUT2R_ENA_MASK 0x0040 /* HPOUT2R_ENA */ +#define WM8996_HPOUT2R_ENA_SHIFT 6 /* HPOUT2R_ENA */ +#define WM8996_HPOUT2R_ENA_WIDTH 1 /* HPOUT2R_ENA */ +#define WM8996_HPOUT1L_ENA 0x0020 /* HPOUT1L_ENA */ +#define WM8996_HPOUT1L_ENA_MASK 0x0020 /* HPOUT1L_ENA */ +#define WM8996_HPOUT1L_ENA_SHIFT 5 /* HPOUT1L_ENA */ +#define WM8996_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */ +#define WM8996_HPOUT1R_ENA 0x0010 /* HPOUT1R_ENA */ +#define WM8996_HPOUT1R_ENA_MASK 0x0010 /* HPOUT1R_ENA */ +#define WM8996_HPOUT1R_ENA_SHIFT 4 /* HPOUT1R_ENA */ +#define WM8996_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */ +#define WM8996_BG_ENA 0x0001 /* BG_ENA */ +#define WM8996_BG_ENA_MASK 0x0001 /* BG_ENA */ +#define WM8996_BG_ENA_SHIFT 0 /* BG_ENA */ +#define WM8996_BG_ENA_WIDTH 1 /* BG_ENA */ + +/* + * R2 (0x02) - Power Management (2) + */ +#define WM8996_OPCLK_ENA 0x0800 /* OPCLK_ENA */ +#define WM8996_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */ +#define WM8996_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */ +#define WM8996_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */ +#define WM8996_INL_ENA 0x0020 /* INL_ENA */ +#define WM8996_INL_ENA_MASK 0x0020 /* INL_ENA */ +#define WM8996_INL_ENA_SHIFT 5 /* INL_ENA */ +#define WM8996_INL_ENA_WIDTH 1 /* INL_ENA */ +#define WM8996_INR_ENA 0x0010 /* INR_ENA */ +#define WM8996_INR_ENA_MASK 0x0010 /* INR_ENA */ +#define WM8996_INR_ENA_SHIFT 4 /* INR_ENA */ +#define WM8996_INR_ENA_WIDTH 1 /* INR_ENA */ +#define WM8996_LDO2_ENA 0x0002 /* LDO2_ENA */ +#define WM8996_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */ +#define WM8996_LDO2_ENA_SHIFT 1 /* LDO2_ENA */ +#define WM8996_LDO2_ENA_WIDTH 1 /* LDO2_ENA */ + +/* + * R3 (0x03) - Power Management (3) + */ +#define WM8996_DSP2RXL_ENA 0x0800 /* DSP2RXL_ENA */ +#define WM8996_DSP2RXL_ENA_MASK 0x0800 /* DSP2RXL_ENA */ +#define WM8996_DSP2RXL_ENA_SHIFT 11 /* DSP2RXL_ENA */ +#define WM8996_DSP2RXL_ENA_WIDTH 1 /* DSP2RXL_ENA */ +#define WM8996_DSP2RXR_ENA 0x0400 /* DSP2RXR_ENA */ +#define WM8996_DSP2RXR_ENA_MASK 0x0400 /* DSP2RXR_ENA */ +#define WM8996_DSP2RXR_ENA_SHIFT 10 /* DSP2RXR_ENA */ +#define WM8996_DSP2RXR_ENA_WIDTH 1 /* DSP2RXR_ENA */ +#define WM8996_DSP1RXL_ENA 0x0200 /* DSP1RXL_ENA */ +#define WM8996_DSP1RXL_ENA_MASK 0x0200 /* DSP1RXL_ENA */ +#define WM8996_DSP1RXL_ENA_SHIFT 9 /* DSP1RXL_ENA */ +#define WM8996_DSP1RXL_ENA_WIDTH 1 /* DSP1RXL_ENA */ +#define WM8996_DSP1RXR_ENA 0x0100 /* DSP1RXR_ENA */ +#define WM8996_DSP1RXR_ENA_MASK 0x0100 /* DSP1RXR_ENA */ +#define WM8996_DSP1RXR_ENA_SHIFT 8 /* DSP1RXR_ENA */ +#define WM8996_DSP1RXR_ENA_WIDTH 1 /* DSP1RXR_ENA */ +#define WM8996_DMIC2L_ENA 0x0020 /* DMIC2L_ENA */ +#define WM8996_DMIC2L_ENA_MASK 0x0020 /* DMIC2L_ENA */ +#define WM8996_DMIC2L_ENA_SHIFT 5 /* DMIC2L_ENA */ +#define WM8996_DMIC2L_ENA_WIDTH 1 /* DMIC2L_ENA */ +#define WM8996_DMIC2R_ENA 0x0010 /* DMIC2R_ENA */ +#define WM8996_DMIC2R_ENA_MASK 0x0010 /* DMIC2R_ENA */ +#define WM8996_DMIC2R_ENA_SHIFT 4 /* DMIC2R_ENA */ +#define WM8996_DMIC2R_ENA_WIDTH 1 /* DMIC2R_ENA */ +#define WM8996_DMIC1L_ENA 0x0008 /* DMIC1L_ENA */ +#define WM8996_DMIC1L_ENA_MASK 0x0008 /* DMIC1L_ENA */ +#define WM8996_DMIC1L_ENA_SHIFT 3 /* DMIC1L_ENA */ +#define WM8996_DMIC1L_ENA_WIDTH 1 /* DMIC1L_ENA */ +#define WM8996_DMIC1R_ENA 0x0004 /* DMIC1R_ENA */ +#define WM8996_DMIC1R_ENA_MASK 0x0004 /* DMIC1R_ENA */ +#define WM8996_DMIC1R_ENA_SHIFT 2 /* DMIC1R_ENA */ +#define WM8996_DMIC1R_ENA_WIDTH 1 /* DMIC1R_ENA */ +#define WM8996_ADCL_ENA 0x0002 /* ADCL_ENA */ +#define WM8996_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */ +#define WM8996_ADCL_ENA_SHIFT 1 /* ADCL_ENA */ +#define WM8996_ADCL_ENA_WIDTH 1 /* ADCL_ENA */ +#define WM8996_ADCR_ENA 0x0001 /* ADCR_ENA */ +#define WM8996_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */ +#define WM8996_ADCR_ENA_SHIFT 0 /* ADCR_ENA */ +#define WM8996_ADCR_ENA_WIDTH 1 /* ADCR_ENA */ + +/* + * R4 (0x04) - Power Management (4) + */ +#define WM8996_AIF2RX_CHAN1_ENA 0x0200 /* AIF2RX_CHAN1_ENA */ +#define WM8996_AIF2RX_CHAN1_ENA_MASK 0x0200 /* AIF2RX_CHAN1_ENA */ +#define WM8996_AIF2RX_CHAN1_ENA_SHIFT 9 /* AIF2RX_CHAN1_ENA */ +#define WM8996_AIF2RX_CHAN1_ENA_WIDTH 1 /* AIF2RX_CHAN1_ENA */ +#define WM8996_AIF2RX_CHAN0_ENA 0x0100 /* AIF2RX_CHAN0_ENA */ +#define WM8996_AIF2RX_CHAN0_ENA_MASK 0x0100 /* AIF2RX_CHAN0_ENA */ +#define WM8996_AIF2RX_CHAN0_ENA_SHIFT 8 /* AIF2RX_CHAN0_ENA */ +#define WM8996_AIF2RX_CHAN0_ENA_WIDTH 1 /* AIF2RX_CHAN0_ENA */ +#define WM8996_AIF1RX_CHAN5_ENA 0x0020 /* AIF1RX_CHAN5_ENA */ +#define WM8996_AIF1RX_CHAN5_ENA_MASK 0x0020 /* AIF1RX_CHAN5_ENA */ +#define WM8996_AIF1RX_CHAN5_ENA_SHIFT 5 /* AIF1RX_CHAN5_ENA */ +#define WM8996_AIF1RX_CHAN5_ENA_WIDTH 1 /* AIF1RX_CHAN5_ENA */ +#define WM8996_AIF1RX_CHAN4_ENA 0x0010 /* AIF1RX_CHAN4_ENA */ +#define WM8996_AIF1RX_CHAN4_ENA_MASK 0x0010 /* AIF1RX_CHAN4_ENA */ +#define WM8996_AIF1RX_CHAN4_ENA_SHIFT 4 /* AIF1RX_CHAN4_ENA */ +#define WM8996_AIF1RX_CHAN4_ENA_WIDTH 1 /* AIF1RX_CHAN4_ENA */ +#define WM8996_AIF1RX_CHAN3_ENA 0x0008 /* AIF1RX_CHAN3_ENA */ +#define WM8996_AIF1RX_CHAN3_ENA_MASK 0x0008 /* AIF1RX_CHAN3_ENA */ +#define WM8996_AIF1RX_CHAN3_ENA_SHIFT 3 /* AIF1RX_CHAN3_ENA */ +#define WM8996_AIF1RX_CHAN3_ENA_WIDTH 1 /* AIF1RX_CHAN3_ENA */ +#define WM8996_AIF1RX_CHAN2_ENA 0x0004 /* AIF1RX_CHAN2_ENA */ +#define WM8996_AIF1RX_CHAN2_ENA_MASK 0x0004 /* AIF1RX_CHAN2_ENA */ +#define WM8996_AIF1RX_CHAN2_ENA_SHIFT 2 /* AIF1RX_CHAN2_ENA */ +#define WM8996_AIF1RX_CHAN2_ENA_WIDTH 1 /* AIF1RX_CHAN2_ENA */ +#define WM8996_AIF1RX_CHAN1_ENA 0x0002 /* AIF1RX_CHAN1_ENA */ +#define WM8996_AIF1RX_CHAN1_ENA_MASK 0x0002 /* AIF1RX_CHAN1_ENA */ +#define WM8996_AIF1RX_CHAN1_ENA_SHIFT 1 /* AIF1RX_CHAN1_ENA */ +#define WM8996_AIF1RX_CHAN1_ENA_WIDTH 1 /* AIF1RX_CHAN1_ENA */ +#define WM8996_AIF1RX_CHAN0_ENA 0x0001 /* AIF1RX_CHAN0_ENA */ +#define WM8996_AIF1RX_CHAN0_ENA_MASK 0x0001 /* AIF1RX_CHAN0_ENA */ +#define WM8996_AIF1RX_CHAN0_ENA_SHIFT 0 /* AIF1RX_CHAN0_ENA */ +#define WM8996_AIF1RX_CHAN0_ENA_WIDTH 1 /* AIF1RX_CHAN0_ENA */ + +/* + * R5 (0x05) - Power Management (5) + */ +#define WM8996_DSP2TXL_ENA 0x0800 /* DSP2TXL_ENA */ +#define WM8996_DSP2TXL_ENA_MASK 0x0800 /* DSP2TXL_ENA */ +#define WM8996_DSP2TXL_ENA_SHIFT 11 /* DSP2TXL_ENA */ +#define WM8996_DSP2TXL_ENA_WIDTH 1 /* DSP2TXL_ENA */ +#define WM8996_DSP2TXR_ENA 0x0400 /* DSP2TXR_ENA */ +#define WM8996_DSP2TXR_ENA_MASK 0x0400 /* DSP2TXR_ENA */ +#define WM8996_DSP2TXR_ENA_SHIFT 10 /* DSP2TXR_ENA */ +#define WM8996_DSP2TXR_ENA_WIDTH 1 /* DSP2TXR_ENA */ +#define WM8996_DSP1TXL_ENA 0x0200 /* DSP1TXL_ENA */ +#define WM8996_DSP1TXL_ENA_MASK 0x0200 /* DSP1TXL_ENA */ +#define WM8996_DSP1TXL_ENA_SHIFT 9 /* DSP1TXL_ENA */ +#define WM8996_DSP1TXL_ENA_WIDTH 1 /* DSP1TXL_ENA */ +#define WM8996_DSP1TXR_ENA 0x0100 /* DSP1TXR_ENA */ +#define WM8996_DSP1TXR_ENA_MASK 0x0100 /* DSP1TXR_ENA */ +#define WM8996_DSP1TXR_ENA_SHIFT 8 /* DSP1TXR_ENA */ +#define WM8996_DSP1TXR_ENA_WIDTH 1 /* DSP1TXR_ENA */ +#define WM8996_DAC2L_ENA 0x0008 /* DAC2L_ENA */ +#define WM8996_DAC2L_ENA_MASK 0x0008 /* DAC2L_ENA */ +#define WM8996_DAC2L_ENA_SHIFT 3 /* DAC2L_ENA */ +#define WM8996_DAC2L_ENA_WIDTH 1 /* DAC2L_ENA */ +#define WM8996_DAC2R_ENA 0x0004 /* DAC2R_ENA */ +#define WM8996_DAC2R_ENA_MASK 0x0004 /* DAC2R_ENA */ +#define WM8996_DAC2R_ENA_SHIFT 2 /* DAC2R_ENA */ +#define WM8996_DAC2R_ENA_WIDTH 1 /* DAC2R_ENA */ +#define WM8996_DAC1L_ENA 0x0002 /* DAC1L_ENA */ +#define WM8996_DAC1L_ENA_MASK 0x0002 /* DAC1L_ENA */ +#define WM8996_DAC1L_ENA_SHIFT 1 /* DAC1L_ENA */ +#define WM8996_DAC1L_ENA_WIDTH 1 /* DAC1L_ENA */ +#define WM8996_DAC1R_ENA 0x0001 /* DAC1R_ENA */ +#define WM8996_DAC1R_ENA_MASK 0x0001 /* DAC1R_ENA */ +#define WM8996_DAC1R_ENA_SHIFT 0 /* DAC1R_ENA */ +#define WM8996_DAC1R_ENA_WIDTH 1 /* DAC1R_ENA */ + +/* + * R6 (0x06) - Power Management (6) + */ +#define WM8996_AIF2TX_CHAN1_ENA 0x0200 /* AIF2TX_CHAN1_ENA */ +#define WM8996_AIF2TX_CHAN1_ENA_MASK 0x0200 /* AIF2TX_CHAN1_ENA */ +#define WM8996_AIF2TX_CHAN1_ENA_SHIFT 9 /* AIF2TX_CHAN1_ENA */ +#define WM8996_AIF2TX_CHAN1_ENA_WIDTH 1 /* AIF2TX_CHAN1_ENA */ +#define WM8996_AIF2TX_CHAN0_ENA 0x0100 /* AIF2TX_CHAN0_ENA */ +#define WM8996_AIF2TX_CHAN0_ENA_MASK 0x0100 /* AIF2TX_CHAN0_ENA */ +#define WM8996_AIF2TX_CHAN0_ENA_SHIFT 8 /* AIF2TX_CHAN0_ENA */ +#define WM8996_AIF2TX_CHAN0_ENA_WIDTH 1 /* AIF2TX_CHAN0_ENA */ +#define WM8996_AIF1TX_CHAN5_ENA 0x0020 /* AIF1TX_CHAN5_ENA */ +#define WM8996_AIF1TX_CHAN5_ENA_MASK 0x0020 /* AIF1TX_CHAN5_ENA */ +#define WM8996_AIF1TX_CHAN5_ENA_SHIFT 5 /* AIF1TX_CHAN5_ENA */ +#define WM8996_AIF1TX_CHAN5_ENA_WIDTH 1 /* AIF1TX_CHAN5_ENA */ +#define WM8996_AIF1TX_CHAN4_ENA 0x0010 /* AIF1TX_CHAN4_ENA */ +#define WM8996_AIF1TX_CHAN4_ENA_MASK 0x0010 /* AIF1TX_CHAN4_ENA */ +#define WM8996_AIF1TX_CHAN4_ENA_SHIFT 4 /* AIF1TX_CHAN4_ENA */ +#define WM8996_AIF1TX_CHAN4_ENA_WIDTH 1 /* AIF1TX_CHAN4_ENA */ +#define WM8996_AIF1TX_CHAN3_ENA 0x0008 /* AIF1TX_CHAN3_ENA */ +#define WM8996_AIF1TX_CHAN3_ENA_MASK 0x0008 /* AIF1TX_CHAN3_ENA */ +#define WM8996_AIF1TX_CHAN3_ENA_SHIFT 3 /* AIF1TX_CHAN3_ENA */ +#define WM8996_AIF1TX_CHAN3_ENA_WIDTH 1 /* AIF1TX_CHAN3_ENA */ +#define WM8996_AIF1TX_CHAN2_ENA 0x0004 /* AIF1TX_CHAN2_ENA */ +#define WM8996_AIF1TX_CHAN2_ENA_MASK 0x0004 /* AIF1TX_CHAN2_ENA */ +#define WM8996_AIF1TX_CHAN2_ENA_SHIFT 2 /* AIF1TX_CHAN2_ENA */ +#define WM8996_AIF1TX_CHAN2_ENA_WIDTH 1 /* AIF1TX_CHAN2_ENA */ +#define WM8996_AIF1TX_CHAN1_ENA 0x0002 /* AIF1TX_CHAN1_ENA */ +#define WM8996_AIF1TX_CHAN1_ENA_MASK 0x0002 /* AIF1TX_CHAN1_ENA */ +#define WM8996_AIF1TX_CHAN1_ENA_SHIFT 1 /* AIF1TX_CHAN1_ENA */ +#define WM8996_AIF1TX_CHAN1_ENA_WIDTH 1 /* AIF1TX_CHAN1_ENA */ +#define WM8996_AIF1TX_CHAN0_ENA 0x0001 /* AIF1TX_CHAN0_ENA */ +#define WM8996_AIF1TX_CHAN0_ENA_MASK 0x0001 /* AIF1TX_CHAN0_ENA */ +#define WM8996_AIF1TX_CHAN0_ENA_SHIFT 0 /* AIF1TX_CHAN0_ENA */ +#define WM8996_AIF1TX_CHAN0_ENA_WIDTH 1 /* AIF1TX_CHAN0_ENA */ + +/* + * R7 (0x07) - Power Management (7) + */ +#define WM8996_DMIC2_FN 0x0200 /* DMIC2_FN */ +#define WM8996_DMIC2_FN_MASK 0x0200 /* DMIC2_FN */ +#define WM8996_DMIC2_FN_SHIFT 9 /* DMIC2_FN */ +#define WM8996_DMIC2_FN_WIDTH 1 /* DMIC2_FN */ +#define WM8996_DMIC1_FN 0x0100 /* DMIC1_FN */ +#define WM8996_DMIC1_FN_MASK 0x0100 /* DMIC1_FN */ +#define WM8996_DMIC1_FN_SHIFT 8 /* DMIC1_FN */ +#define WM8996_DMIC1_FN_WIDTH 1 /* DMIC1_FN */ +#define WM8996_ADC_DMIC_DSP2R_ENA 0x0080 /* ADC_DMIC_DSP2R_ENA */ +#define WM8996_ADC_DMIC_DSP2R_ENA_MASK 0x0080 /* ADC_DMIC_DSP2R_ENA */ +#define WM8996_ADC_DMIC_DSP2R_ENA_SHIFT 7 /* ADC_DMIC_DSP2R_ENA */ +#define WM8996_ADC_DMIC_DSP2R_ENA_WIDTH 1 /* ADC_DMIC_DSP2R_ENA */ +#define WM8996_ADC_DMIC_DSP2L_ENA 0x0040 /* ADC_DMIC_DSP2L_ENA */ +#define WM8996_ADC_DMIC_DSP2L_ENA_MASK 0x0040 /* ADC_DMIC_DSP2L_ENA */ +#define WM8996_ADC_DMIC_DSP2L_ENA_SHIFT 6 /* ADC_DMIC_DSP2L_ENA */ +#define WM8996_ADC_DMIC_DSP2L_ENA_WIDTH 1 /* ADC_DMIC_DSP2L_ENA */ +#define WM8996_ADC_DMIC_SRC2_MASK 0x0030 /* ADC_DMIC_SRC2 - [5:4] */ +#define WM8996_ADC_DMIC_SRC2_SHIFT 4 /* ADC_DMIC_SRC2 - [5:4] */ +#define WM8996_ADC_DMIC_SRC2_WIDTH 2 /* ADC_DMIC_SRC2 - [5:4] */ +#define WM8996_ADC_DMIC_DSP1R_ENA 0x0008 /* ADC_DMIC_DSP1R_ENA */ +#define WM8996_ADC_DMIC_DSP1R_ENA_MASK 0x0008 /* ADC_DMIC_DSP1R_ENA */ +#define WM8996_ADC_DMIC_DSP1R_ENA_SHIFT 3 /* ADC_DMIC_DSP1R_ENA */ +#define WM8996_ADC_DMIC_DSP1R_ENA_WIDTH 1 /* ADC_DMIC_DSP1R_ENA */ +#define WM8996_ADC_DMIC_DSP1L_ENA 0x0004 /* ADC_DMIC_DSP1L_ENA */ +#define WM8996_ADC_DMIC_DSP1L_ENA_MASK 0x0004 /* ADC_DMIC_DSP1L_ENA */ +#define WM8996_ADC_DMIC_DSP1L_ENA_SHIFT 2 /* ADC_DMIC_DSP1L_ENA */ +#define WM8996_ADC_DMIC_DSP1L_ENA_WIDTH 1 /* ADC_DMIC_DSP1L_ENA */ +#define WM8996_ADC_DMIC_SRC1_MASK 0x0003 /* ADC_DMIC_SRC1 - [1:0] */ +#define WM8996_ADC_DMIC_SRC1_SHIFT 0 /* ADC_DMIC_SRC1 - [1:0] */ +#define WM8996_ADC_DMIC_SRC1_WIDTH 2 /* ADC_DMIC_SRC1 - [1:0] */ + +/* + * R8 (0x08) - Power Management (8) + */ +#define WM8996_AIF2TX_SRC_MASK 0x00C0 /* AIF2TX_SRC - [7:6] */ +#define WM8996_AIF2TX_SRC_SHIFT 6 /* AIF2TX_SRC - [7:6] */ +#define WM8996_AIF2TX_SRC_WIDTH 2 /* AIF2TX_SRC - [7:6] */ +#define WM8996_DSP2RX_SRC 0x0010 /* DSP2RX_SRC */ +#define WM8996_DSP2RX_SRC_MASK 0x0010 /* DSP2RX_SRC */ +#define WM8996_DSP2RX_SRC_SHIFT 4 /* DSP2RX_SRC */ +#define WM8996_DSP2RX_SRC_WIDTH 1 /* DSP2RX_SRC */ +#define WM8996_DSP1RX_SRC 0x0001 /* DSP1RX_SRC */ +#define WM8996_DSP1RX_SRC_MASK 0x0001 /* DSP1RX_SRC */ +#define WM8996_DSP1RX_SRC_SHIFT 0 /* DSP1RX_SRC */ +#define WM8996_DSP1RX_SRC_WIDTH 1 /* DSP1RX_SRC */ + +/* + * R16 (0x10) - Left Line Input Volume + */ +#define WM8996_IN1_VU 0x0080 /* IN1_VU */ +#define WM8996_IN1_VU_MASK 0x0080 /* IN1_VU */ +#define WM8996_IN1_VU_SHIFT 7 /* IN1_VU */ +#define WM8996_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8996_IN1L_ZC 0x0020 /* IN1L_ZC */ +#define WM8996_IN1L_ZC_MASK 0x0020 /* IN1L_ZC */ +#define WM8996_IN1L_ZC_SHIFT 5 /* IN1L_ZC */ +#define WM8996_IN1L_ZC_WIDTH 1 /* IN1L_ZC */ +#define WM8996_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */ +#define WM8996_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */ +#define WM8996_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */ + +/* + * R17 (0x11) - Right Line Input Volume + */ +#define WM8996_IN1_VU 0x0080 /* IN1_VU */ +#define WM8996_IN1_VU_MASK 0x0080 /* IN1_VU */ +#define WM8996_IN1_VU_SHIFT 7 /* IN1_VU */ +#define WM8996_IN1_VU_WIDTH 1 /* IN1_VU */ +#define WM8996_IN1R_ZC 0x0020 /* IN1R_ZC */ +#define WM8996_IN1R_ZC_MASK 0x0020 /* IN1R_ZC */ +#define WM8996_IN1R_ZC_SHIFT 5 /* IN1R_ZC */ +#define WM8996_IN1R_ZC_WIDTH 1 /* IN1R_ZC */ +#define WM8996_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */ +#define WM8996_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */ +#define WM8996_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */ + +/* + * R18 (0x12) - Line Input Control + */ +#define WM8996_INL_MODE_MASK 0x000C /* INL_MODE - [3:2] */ +#define WM8996_INL_MODE_SHIFT 2 /* INL_MODE - [3:2] */ +#define WM8996_INL_MODE_WIDTH 2 /* INL_MODE - [3:2] */ +#define WM8996_INR_MODE_MASK 0x0003 /* INR_MODE - [1:0] */ +#define WM8996_INR_MODE_SHIFT 0 /* INR_MODE - [1:0] */ +#define WM8996_INR_MODE_WIDTH 2 /* INR_MODE - [1:0] */ + +/* + * R21 (0x15) - DAC1 HPOUT1 Volume + */ +#define WM8996_DAC1R_HPOUT1R_VOL_MASK 0x00F0 /* DAC1R_HPOUT1R_VOL - [7:4] */ +#define WM8996_DAC1R_HPOUT1R_VOL_SHIFT 4 /* DAC1R_HPOUT1R_VOL - [7:4] */ +#define WM8996_DAC1R_HPOUT1R_VOL_WIDTH 4 /* DAC1R_HPOUT1R_VOL - [7:4] */ +#define WM8996_DAC1L_HPOUT1L_VOL_MASK 0x000F /* DAC1L_HPOUT1L_VOL - [3:0] */ +#define WM8996_DAC1L_HPOUT1L_VOL_SHIFT 0 /* DAC1L_HPOUT1L_VOL - [3:0] */ +#define WM8996_DAC1L_HPOUT1L_VOL_WIDTH 4 /* DAC1L_HPOUT1L_VOL - [3:0] */ + +/* + * R22 (0x16) - DAC2 HPOUT2 Volume + */ +#define WM8996_DAC2R_HPOUT2R_VOL_MASK 0x00F0 /* DAC2R_HPOUT2R_VOL - [7:4] */ +#define WM8996_DAC2R_HPOUT2R_VOL_SHIFT 4 /* DAC2R_HPOUT2R_VOL - [7:4] */ +#define WM8996_DAC2R_HPOUT2R_VOL_WIDTH 4 /* DAC2R_HPOUT2R_VOL - [7:4] */ +#define WM8996_DAC2L_HPOUT2L_VOL_MASK 0x000F /* DAC2L_HPOUT2L_VOL - [3:0] */ +#define WM8996_DAC2L_HPOUT2L_VOL_SHIFT 0 /* DAC2L_HPOUT2L_VOL - [3:0] */ +#define WM8996_DAC2L_HPOUT2L_VOL_WIDTH 4 /* DAC2L_HPOUT2L_VOL - [3:0] */ + +/* + * R24 (0x18) - DAC1 Left Volume + */ +#define WM8996_DAC1L_MUTE 0x0200 /* DAC1L_MUTE */ +#define WM8996_DAC1L_MUTE_MASK 0x0200 /* DAC1L_MUTE */ +#define WM8996_DAC1L_MUTE_SHIFT 9 /* DAC1L_MUTE */ +#define WM8996_DAC1L_MUTE_WIDTH 1 /* DAC1L_MUTE */ +#define WM8996_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8996_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8996_DAC1L_VOL_MASK 0x00FF /* DAC1L_VOL - [7:0] */ +#define WM8996_DAC1L_VOL_SHIFT 0 /* DAC1L_VOL - [7:0] */ +#define WM8996_DAC1L_VOL_WIDTH 8 /* DAC1L_VOL - [7:0] */ + +/* + * R25 (0x19) - DAC1 Right Volume + */ +#define WM8996_DAC1R_MUTE 0x0200 /* DAC1R_MUTE */ +#define WM8996_DAC1R_MUTE_MASK 0x0200 /* DAC1R_MUTE */ +#define WM8996_DAC1R_MUTE_SHIFT 9 /* DAC1R_MUTE */ +#define WM8996_DAC1R_MUTE_WIDTH 1 /* DAC1R_MUTE */ +#define WM8996_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8996_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8996_DAC1R_VOL_MASK 0x00FF /* DAC1R_VOL - [7:0] */ +#define WM8996_DAC1R_VOL_SHIFT 0 /* DAC1R_VOL - [7:0] */ +#define WM8996_DAC1R_VOL_WIDTH 8 /* DAC1R_VOL - [7:0] */ + +/* + * R26 (0x1A) - DAC2 Left Volume + */ +#define WM8996_DAC2L_MUTE 0x0200 /* DAC2L_MUTE */ +#define WM8996_DAC2L_MUTE_MASK 0x0200 /* DAC2L_MUTE */ +#define WM8996_DAC2L_MUTE_SHIFT 9 /* DAC2L_MUTE */ +#define WM8996_DAC2L_MUTE_WIDTH 1 /* DAC2L_MUTE */ +#define WM8996_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8996_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8996_DAC2L_VOL_MASK 0x00FF /* DAC2L_VOL - [7:0] */ +#define WM8996_DAC2L_VOL_SHIFT 0 /* DAC2L_VOL - [7:0] */ +#define WM8996_DAC2L_VOL_WIDTH 8 /* DAC2L_VOL - [7:0] */ + +/* + * R27 (0x1B) - DAC2 Right Volume + */ +#define WM8996_DAC2R_MUTE 0x0200 /* DAC2R_MUTE */ +#define WM8996_DAC2R_MUTE_MASK 0x0200 /* DAC2R_MUTE */ +#define WM8996_DAC2R_MUTE_SHIFT 9 /* DAC2R_MUTE */ +#define WM8996_DAC2R_MUTE_WIDTH 1 /* DAC2R_MUTE */ +#define WM8996_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8996_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8996_DAC2R_VOL_MASK 0x00FF /* DAC2R_VOL - [7:0] */ +#define WM8996_DAC2R_VOL_SHIFT 0 /* DAC2R_VOL - [7:0] */ +#define WM8996_DAC2R_VOL_WIDTH 8 /* DAC2R_VOL - [7:0] */ + +/* + * R28 (0x1C) - Output1 Left Volume + */ +#define WM8996_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8996_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8996_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */ +#define WM8996_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */ +#define WM8996_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */ +#define WM8996_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */ +#define WM8996_HPOUT1L_VOL_MASK 0x000F /* HPOUT1L_VOL - [3:0] */ +#define WM8996_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [3:0] */ +#define WM8996_HPOUT1L_VOL_WIDTH 4 /* HPOUT1L_VOL - [3:0] */ + +/* + * R29 (0x1D) - Output1 Right Volume + */ +#define WM8996_DAC1_VU 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_MASK 0x0100 /* DAC1_VU */ +#define WM8996_DAC1_VU_SHIFT 8 /* DAC1_VU */ +#define WM8996_DAC1_VU_WIDTH 1 /* DAC1_VU */ +#define WM8996_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */ +#define WM8996_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */ +#define WM8996_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */ +#define WM8996_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */ +#define WM8996_HPOUT1R_VOL_MASK 0x000F /* HPOUT1R_VOL - [3:0] */ +#define WM8996_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [3:0] */ +#define WM8996_HPOUT1R_VOL_WIDTH 4 /* HPOUT1R_VOL - [3:0] */ + +/* + * R30 (0x1E) - Output2 Left Volume + */ +#define WM8996_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8996_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8996_HPOUT2L_ZC 0x0080 /* HPOUT2L_ZC */ +#define WM8996_HPOUT2L_ZC_MASK 0x0080 /* HPOUT2L_ZC */ +#define WM8996_HPOUT2L_ZC_SHIFT 7 /* HPOUT2L_ZC */ +#define WM8996_HPOUT2L_ZC_WIDTH 1 /* HPOUT2L_ZC */ +#define WM8996_HPOUT2L_VOL_MASK 0x000F /* HPOUT2L_VOL - [3:0] */ +#define WM8996_HPOUT2L_VOL_SHIFT 0 /* HPOUT2L_VOL - [3:0] */ +#define WM8996_HPOUT2L_VOL_WIDTH 4 /* HPOUT2L_VOL - [3:0] */ + +/* + * R31 (0x1F) - Output2 Right Volume + */ +#define WM8996_DAC2_VU 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_MASK 0x0100 /* DAC2_VU */ +#define WM8996_DAC2_VU_SHIFT 8 /* DAC2_VU */ +#define WM8996_DAC2_VU_WIDTH 1 /* DAC2_VU */ +#define WM8996_HPOUT2R_ZC 0x0080 /* HPOUT2R_ZC */ +#define WM8996_HPOUT2R_ZC_MASK 0x0080 /* HPOUT2R_ZC */ +#define WM8996_HPOUT2R_ZC_SHIFT 7 /* HPOUT2R_ZC */ +#define WM8996_HPOUT2R_ZC_WIDTH 1 /* HPOUT2R_ZC */ +#define WM8996_HPOUT2R_VOL_MASK 0x000F /* HPOUT2R_VOL - [3:0] */ +#define WM8996_HPOUT2R_VOL_SHIFT 0 /* HPOUT2R_VOL - [3:0] */ +#define WM8996_HPOUT2R_VOL_WIDTH 4 /* HPOUT2R_VOL - [3:0] */ + +/* + * R32 (0x20) - MICBIAS (1) + */ +#define WM8996_MICB1_RATE 0x0020 /* MICB1_RATE */ +#define WM8996_MICB1_RATE_MASK 0x0020 /* MICB1_RATE */ +#define WM8996_MICB1_RATE_SHIFT 5 /* MICB1_RATE */ +#define WM8996_MICB1_RATE_WIDTH 1 /* MICB1_RATE */ +#define WM8996_MICB1_MODE 0x0010 /* MICB1_MODE */ +#define WM8996_MICB1_MODE_MASK 0x0010 /* MICB1_MODE */ +#define WM8996_MICB1_MODE_SHIFT 4 /* MICB1_MODE */ +#define WM8996_MICB1_MODE_WIDTH 1 /* MICB1_MODE */ +#define WM8996_MICB1_LVL_MASK 0x000E /* MICB1_LVL - [3:1] */ +#define WM8996_MICB1_LVL_SHIFT 1 /* MICB1_LVL - [3:1] */ +#define WM8996_MICB1_LVL_WIDTH 3 /* MICB1_LVL - [3:1] */ +#define WM8996_MICB1_DISCH 0x0001 /* MICB1_DISCH */ +#define WM8996_MICB1_DISCH_MASK 0x0001 /* MICB1_DISCH */ +#define WM8996_MICB1_DISCH_SHIFT 0 /* MICB1_DISCH */ +#define WM8996_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */ + +/* + * R33 (0x21) - MICBIAS (2) + */ +#define WM8996_MICB2_RATE 0x0020 /* MICB2_RATE */ +#define WM8996_MICB2_RATE_MASK 0x0020 /* MICB2_RATE */ +#define WM8996_MICB2_RATE_SHIFT 5 /* MICB2_RATE */ +#define WM8996_MICB2_RATE_WIDTH 1 /* MICB2_RATE */ +#define WM8996_MICB2_MODE 0x0010 /* MICB2_MODE */ +#define WM8996_MICB2_MODE_MASK 0x0010 /* MICB2_MODE */ +#define WM8996_MICB2_MODE_SHIFT 4 /* MICB2_MODE */ +#define WM8996_MICB2_MODE_WIDTH 1 /* MICB2_MODE */ +#define WM8996_MICB2_LVL_MASK 0x000E /* MICB2_LVL - [3:1] */ +#define WM8996_MICB2_LVL_SHIFT 1 /* MICB2_LVL - [3:1] */ +#define WM8996_MICB2_LVL_WIDTH 3 /* MICB2_LVL - [3:1] */ +#define WM8996_MICB2_DISCH 0x0001 /* MICB2_DISCH */ +#define WM8996_MICB2_DISCH_MASK 0x0001 /* MICB2_DISCH */ +#define WM8996_MICB2_DISCH_SHIFT 0 /* MICB2_DISCH */ +#define WM8996_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */ + +/* + * R40 (0x28) - LDO 1 + */ +#define WM8996_LDO1_MODE 0x0020 /* LDO1_MODE */ +#define WM8996_LDO1_MODE_MASK 0x0020 /* LDO1_MODE */ +#define WM8996_LDO1_MODE_SHIFT 5 /* LDO1_MODE */ +#define WM8996_LDO1_MODE_WIDTH 1 /* LDO1_MODE */ +#define WM8996_LDO1_VSEL_MASK 0x0006 /* LDO1_VSEL - [2:1] */ +#define WM8996_LDO1_VSEL_SHIFT 1 /* LDO1_VSEL - [2:1] */ +#define WM8996_LDO1_VSEL_WIDTH 2 /* LDO1_VSEL - [2:1] */ +#define WM8996_LDO1_DISCH 0x0001 /* LDO1_DISCH */ +#define WM8996_LDO1_DISCH_MASK 0x0001 /* LDO1_DISCH */ +#define WM8996_LDO1_DISCH_SHIFT 0 /* LDO1_DISCH */ +#define WM8996_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */ + +/* + * R41 (0x29) - LDO 2 + */ +#define WM8996_LDO2_MODE 0x0020 /* LDO2_MODE */ +#define WM8996_LDO2_MODE_MASK 0x0020 /* LDO2_MODE */ +#define WM8996_LDO2_MODE_SHIFT 5 /* LDO2_MODE */ +#define WM8996_LDO2_MODE_WIDTH 1 /* LDO2_MODE */ +#define WM8996_LDO2_VSEL_MASK 0x001E /* LDO2_VSEL - [4:1] */ +#define WM8996_LDO2_VSEL_SHIFT 1 /* LDO2_VSEL - [4:1] */ +#define WM8996_LDO2_VSEL_WIDTH 4 /* LDO2_VSEL - [4:1] */ +#define WM8996_LDO2_DISCH 0x0001 /* LDO2_DISCH */ +#define WM8996_LDO2_DISCH_MASK 0x0001 /* LDO2_DISCH */ +#define WM8996_LDO2_DISCH_SHIFT 0 /* LDO2_DISCH */ +#define WM8996_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */ + +/* + * R48 (0x30) - Accessory Detect Mode 1 + */ +#define WM8996_JD_MODE_MASK 0x0003 /* JD_MODE - [1:0] */ +#define WM8996_JD_MODE_SHIFT 0 /* JD_MODE - [1:0] */ +#define WM8996_JD_MODE_WIDTH 2 /* JD_MODE - [1:0] */ + +/* + * R49 (0x31) - Accessory Detect Mode 2 + */ +#define WM8996_HPOUT1FB_SRC 0x0004 /* HPOUT1FB_SRC */ +#define WM8996_HPOUT1FB_SRC_MASK 0x0004 /* HPOUT1FB_SRC */ +#define WM8996_HPOUT1FB_SRC_SHIFT 2 /* HPOUT1FB_SRC */ +#define WM8996_HPOUT1FB_SRC_WIDTH 1 /* HPOUT1FB_SRC */ +#define WM8996_MICD_SRC 0x0002 /* MICD_SRC */ +#define WM8996_MICD_SRC_MASK 0x0002 /* MICD_SRC */ +#define WM8996_MICD_SRC_SHIFT 1 /* MICD_SRC */ +#define WM8996_MICD_SRC_WIDTH 1 /* MICD_SRC */ +#define WM8996_MICD_BIAS_SRC 0x0001 /* MICD_BIAS_SRC */ +#define WM8996_MICD_BIAS_SRC_MASK 0x0001 /* MICD_BIAS_SRC */ +#define WM8996_MICD_BIAS_SRC_SHIFT 0 /* MICD_BIAS_SRC */ +#define WM8996_MICD_BIAS_SRC_WIDTH 1 /* MICD_BIAS_SRC */ + +/* + * R52 (0x34) - Headphone Detect 1 + */ +#define WM8996_HP_HOLDTIME_MASK 0x00E0 /* HP_HOLDTIME - [7:5] */ +#define WM8996_HP_HOLDTIME_SHIFT 5 /* HP_HOLDTIME - [7:5] */ +#define WM8996_HP_HOLDTIME_WIDTH 3 /* HP_HOLDTIME - [7:5] */ +#define WM8996_HP_CLK_DIV_MASK 0x0018 /* HP_CLK_DIV - [4:3] */ +#define WM8996_HP_CLK_DIV_SHIFT 3 /* HP_CLK_DIV - [4:3] */ +#define WM8996_HP_CLK_DIV_WIDTH 2 /* HP_CLK_DIV - [4:3] */ +#define WM8996_HP_STEP_SIZE 0x0002 /* HP_STEP_SIZE */ +#define WM8996_HP_STEP_SIZE_MASK 0x0002 /* HP_STEP_SIZE */ +#define WM8996_HP_STEP_SIZE_SHIFT 1 /* HP_STEP_SIZE */ +#define WM8996_HP_STEP_SIZE_WIDTH 1 /* HP_STEP_SIZE */ +#define WM8996_HP_POLL 0x0001 /* HP_POLL */ +#define WM8996_HP_POLL_MASK 0x0001 /* HP_POLL */ +#define WM8996_HP_POLL_SHIFT 0 /* HP_POLL */ +#define WM8996_HP_POLL_WIDTH 1 /* HP_POLL */ + +/* + * R53 (0x35) - Headphone Detect 2 + */ +#define WM8996_HP_DONE 0x0080 /* HP_DONE */ +#define WM8996_HP_DONE_MASK 0x0080 /* HP_DONE */ +#define WM8996_HP_DONE_SHIFT 7 /* HP_DONE */ +#define WM8996_HP_DONE_WIDTH 1 /* HP_DONE */ +#define WM8996_HP_LVL_MASK 0x007F /* HP_LVL - [6:0] */ +#define WM8996_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */ +#define WM8996_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */ + +/* + * R56 (0x38) - Mic Detect 1 + */ +#define WM8996_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8996_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8996_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */ +#define WM8996_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */ +#define WM8996_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */ +#define WM8996_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */ +#define WM8996_MICD_DBTIME 0x0002 /* MICD_DBTIME */ +#define WM8996_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */ +#define WM8996_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */ +#define WM8996_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */ +#define WM8996_MICD_ENA 0x0001 /* MICD_ENA */ +#define WM8996_MICD_ENA_MASK 0x0001 /* MICD_ENA */ +#define WM8996_MICD_ENA_SHIFT 0 /* MICD_ENA */ +#define WM8996_MICD_ENA_WIDTH 1 /* MICD_ENA */ + +/* + * R57 (0x39) - Mic Detect 2 + */ +#define WM8996_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */ +#define WM8996_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */ +#define WM8996_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */ + +/* + * R58 (0x3A) - Mic Detect 3 + */ +#define WM8996_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */ +#define WM8996_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */ +#define WM8996_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */ +#define WM8996_MICD_VALID 0x0002 /* MICD_VALID */ +#define WM8996_MICD_VALID_MASK 0x0002 /* MICD_VALID */ +#define WM8996_MICD_VALID_SHIFT 1 /* MICD_VALID */ +#define WM8996_MICD_VALID_WIDTH 1 /* MICD_VALID */ +#define WM8996_MICD_STS 0x0001 /* MICD_STS */ +#define WM8996_MICD_STS_MASK 0x0001 /* MICD_STS */ +#define WM8996_MICD_STS_SHIFT 0 /* MICD_STS */ +#define WM8996_MICD_STS_WIDTH 1 /* MICD_STS */ + +/* + * R64 (0x40) - Charge Pump (1) + */ +#define WM8996_CP_ENA 0x8000 /* CP_ENA */ +#define WM8996_CP_ENA_MASK 0x8000 /* CP_ENA */ +#define WM8996_CP_ENA_SHIFT 15 /* CP_ENA */ +#define WM8996_CP_ENA_WIDTH 1 /* CP_ENA */ + +/* + * R65 (0x41) - Charge Pump (2) + */ +#define WM8996_CP_DISCH 0x8000 /* CP_DISCH */ +#define WM8996_CP_DISCH_MASK 0x8000 /* CP_DISCH */ +#define WM8996_CP_DISCH_SHIFT 15 /* CP_DISCH */ +#define WM8996_CP_DISCH_WIDTH 1 /* CP_DISCH */ + +/* + * R80 (0x50) - DC Servo (1) + */ +#define WM8996_DCS_ENA_CHAN_3 0x0008 /* DCS_ENA_CHAN_3 */ +#define WM8996_DCS_ENA_CHAN_3_MASK 0x0008 /* DCS_ENA_CHAN_3 */ +#define WM8996_DCS_ENA_CHAN_3_SHIFT 3 /* DCS_ENA_CHAN_3 */ +#define WM8996_DCS_ENA_CHAN_3_WIDTH 1 /* DCS_ENA_CHAN_3 */ +#define WM8996_DCS_ENA_CHAN_2 0x0004 /* DCS_ENA_CHAN_2 */ +#define WM8996_DCS_ENA_CHAN_2_MASK 0x0004 /* DCS_ENA_CHAN_2 */ +#define WM8996_DCS_ENA_CHAN_2_SHIFT 2 /* DCS_ENA_CHAN_2 */ +#define WM8996_DCS_ENA_CHAN_2_WIDTH 1 /* DCS_ENA_CHAN_2 */ +#define WM8996_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8996_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */ +#define WM8996_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */ +#define WM8996_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */ +#define WM8996_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8996_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */ +#define WM8996_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */ +#define WM8996_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */ + +/* + * R81 (0x51) - DC Servo (2) + */ +#define WM8996_DCS_TRIG_SINGLE_3 0x8000 /* DCS_TRIG_SINGLE_3 */ +#define WM8996_DCS_TRIG_SINGLE_3_MASK 0x8000 /* DCS_TRIG_SINGLE_3 */ +#define WM8996_DCS_TRIG_SINGLE_3_SHIFT 15 /* DCS_TRIG_SINGLE_3 */ +#define WM8996_DCS_TRIG_SINGLE_3_WIDTH 1 /* DCS_TRIG_SINGLE_3 */ +#define WM8996_DCS_TRIG_SINGLE_2 0x4000 /* DCS_TRIG_SINGLE_2 */ +#define WM8996_DCS_TRIG_SINGLE_2_MASK 0x4000 /* DCS_TRIG_SINGLE_2 */ +#define WM8996_DCS_TRIG_SINGLE_2_SHIFT 14 /* DCS_TRIG_SINGLE_2 */ +#define WM8996_DCS_TRIG_SINGLE_2_WIDTH 1 /* DCS_TRIG_SINGLE_2 */ +#define WM8996_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8996_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */ +#define WM8996_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */ +#define WM8996_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */ +#define WM8996_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8996_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */ +#define WM8996_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */ +#define WM8996_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */ +#define WM8996_DCS_TRIG_SERIES_3 0x0800 /* DCS_TRIG_SERIES_3 */ +#define WM8996_DCS_TRIG_SERIES_3_MASK 0x0800 /* DCS_TRIG_SERIES_3 */ +#define WM8996_DCS_TRIG_SERIES_3_SHIFT 11 /* DCS_TRIG_SERIES_3 */ +#define WM8996_DCS_TRIG_SERIES_3_WIDTH 1 /* DCS_TRIG_SERIES_3 */ +#define WM8996_DCS_TRIG_SERIES_2 0x0400 /* DCS_TRIG_SERIES_2 */ +#define WM8996_DCS_TRIG_SERIES_2_MASK 0x0400 /* DCS_TRIG_SERIES_2 */ +#define WM8996_DCS_TRIG_SERIES_2_SHIFT 10 /* DCS_TRIG_SERIES_2 */ +#define WM8996_DCS_TRIG_SERIES_2_WIDTH 1 /* DCS_TRIG_SERIES_2 */ +#define WM8996_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8996_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */ +#define WM8996_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */ +#define WM8996_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */ +#define WM8996_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8996_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */ +#define WM8996_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */ +#define WM8996_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */ +#define WM8996_DCS_TRIG_STARTUP_3 0x0080 /* DCS_TRIG_STARTUP_3 */ +#define WM8996_DCS_TRIG_STARTUP_3_MASK 0x0080 /* DCS_TRIG_STARTUP_3 */ +#define WM8996_DCS_TRIG_STARTUP_3_SHIFT 7 /* DCS_TRIG_STARTUP_3 */ +#define WM8996_DCS_TRIG_STARTUP_3_WIDTH 1 /* DCS_TRIG_STARTUP_3 */ +#define WM8996_DCS_TRIG_STARTUP_2 0x0040 /* DCS_TRIG_STARTUP_2 */ +#define WM8996_DCS_TRIG_STARTUP_2_MASK 0x0040 /* DCS_TRIG_STARTUP_2 */ +#define WM8996_DCS_TRIG_STARTUP_2_SHIFT 6 /* DCS_TRIG_STARTUP_2 */ +#define WM8996_DCS_TRIG_STARTUP_2_WIDTH 1 /* DCS_TRIG_STARTUP_2 */ +#define WM8996_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8996_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */ +#define WM8996_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */ +#define WM8996_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */ +#define WM8996_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8996_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */ +#define WM8996_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */ +#define WM8996_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */ +#define WM8996_DCS_TRIG_DAC_WR_3 0x0008 /* DCS_TRIG_DAC_WR_3 */ +#define WM8996_DCS_TRIG_DAC_WR_3_MASK 0x0008 /* DCS_TRIG_DAC_WR_3 */ +#define WM8996_DCS_TRIG_DAC_WR_3_SHIFT 3 /* DCS_TRIG_DAC_WR_3 */ +#define WM8996_DCS_TRIG_DAC_WR_3_WIDTH 1 /* DCS_TRIG_DAC_WR_3 */ +#define WM8996_DCS_TRIG_DAC_WR_2 0x0004 /* DCS_TRIG_DAC_WR_2 */ +#define WM8996_DCS_TRIG_DAC_WR_2_MASK 0x0004 /* DCS_TRIG_DAC_WR_2 */ +#define WM8996_DCS_TRIG_DAC_WR_2_SHIFT 2 /* DCS_TRIG_DAC_WR_2 */ +#define WM8996_DCS_TRIG_DAC_WR_2_WIDTH 1 /* DCS_TRIG_DAC_WR_2 */ +#define WM8996_DCS_TRIG_DAC_WR_1 0x0002 /* DCS_TRIG_DAC_WR_1 */ +#define WM8996_DCS_TRIG_DAC_WR_1_MASK 0x0002 /* DCS_TRIG_DAC_WR_1 */ +#define WM8996_DCS_TRIG_DAC_WR_1_SHIFT 1 /* DCS_TRIG_DAC_WR_1 */ +#define WM8996_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */ +#define WM8996_DCS_TRIG_DAC_WR_0 0x0001 /* DCS_TRIG_DAC_WR_0 */ +#define WM8996_DCS_TRIG_DAC_WR_0_MASK 0x0001 /* DCS_TRIG_DAC_WR_0 */ +#define WM8996_DCS_TRIG_DAC_WR_0_SHIFT 0 /* DCS_TRIG_DAC_WR_0 */ +#define WM8996_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */ + +/* + * R82 (0x52) - DC Servo (3) + */ +#define WM8996_DCS_TIMER_PERIOD_23_MASK 0x0F00 /* DCS_TIMER_PERIOD_23 - [11:8] */ +#define WM8996_DCS_TIMER_PERIOD_23_SHIFT 8 /* DCS_TIMER_PERIOD_23 - [11:8] */ +#define WM8996_DCS_TIMER_PERIOD_23_WIDTH 4 /* DCS_TIMER_PERIOD_23 - [11:8] */ +#define WM8996_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8996_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */ +#define WM8996_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */ + +/* + * R84 (0x54) - DC Servo (5) + */ +#define WM8996_DCS_SERIES_NO_23_MASK 0x7F00 /* DCS_SERIES_NO_23 - [14:8] */ +#define WM8996_DCS_SERIES_NO_23_SHIFT 8 /* DCS_SERIES_NO_23 - [14:8] */ +#define WM8996_DCS_SERIES_NO_23_WIDTH 7 /* DCS_SERIES_NO_23 - [14:8] */ +#define WM8996_DCS_SERIES_NO_01_MASK 0x007F /* DCS_SERIES_NO_01 - [6:0] */ +#define WM8996_DCS_SERIES_NO_01_SHIFT 0 /* DCS_SERIES_NO_01 - [6:0] */ +#define WM8996_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [6:0] */ + +/* + * R85 (0x55) - DC Servo (6) + */ +#define WM8996_DCS_DAC_WR_VAL_3_MASK 0xFF00 /* DCS_DAC_WR_VAL_3 - [15:8] */ +#define WM8996_DCS_DAC_WR_VAL_3_SHIFT 8 /* DCS_DAC_WR_VAL_3 - [15:8] */ +#define WM8996_DCS_DAC_WR_VAL_3_WIDTH 8 /* DCS_DAC_WR_VAL_3 - [15:8] */ +#define WM8996_DCS_DAC_WR_VAL_2_MASK 0x00FF /* DCS_DAC_WR_VAL_2 - [7:0] */ +#define WM8996_DCS_DAC_WR_VAL_2_SHIFT 0 /* DCS_DAC_WR_VAL_2 - [7:0] */ +#define WM8996_DCS_DAC_WR_VAL_2_WIDTH 8 /* DCS_DAC_WR_VAL_2 - [7:0] */ + +/* + * R86 (0x56) - DC Servo (7) + */ +#define WM8996_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8996_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8996_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */ +#define WM8996_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8996_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */ +#define WM8996_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */ + +/* + * R87 (0x57) - DC Servo Readback 0 + */ +#define WM8996_DCS_CAL_COMPLETE_MASK 0x0F00 /* DCS_CAL_COMPLETE - [11:8] */ +#define WM8996_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [11:8] */ +#define WM8996_DCS_CAL_COMPLETE_WIDTH 4 /* DCS_CAL_COMPLETE - [11:8] */ +#define WM8996_DCS_DAC_WR_COMPLETE_MASK 0x00F0 /* DCS_DAC_WR_COMPLETE - [7:4] */ +#define WM8996_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [7:4] */ +#define WM8996_DCS_DAC_WR_COMPLETE_WIDTH 4 /* DCS_DAC_WR_COMPLETE - [7:4] */ +#define WM8996_DCS_STARTUP_COMPLETE_MASK 0x000F /* DCS_STARTUP_COMPLETE - [3:0] */ +#define WM8996_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [3:0] */ +#define WM8996_DCS_STARTUP_COMPLETE_WIDTH 4 /* DCS_STARTUP_COMPLETE - [3:0] */ + +/* + * R96 (0x60) - Analogue HP (1) + */ +#define WM8996_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8996_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */ +#define WM8996_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */ +#define WM8996_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */ +#define WM8996_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */ +#define WM8996_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */ +#define WM8996_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */ +#define WM8996_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */ +#define WM8996_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */ +#define WM8996_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */ +#define WM8996_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */ +#define WM8996_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */ +#define WM8996_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8996_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */ +#define WM8996_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */ +#define WM8996_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */ +#define WM8996_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */ +#define WM8996_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */ +#define WM8996_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */ +#define WM8996_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */ +#define WM8996_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */ +#define WM8996_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */ +#define WM8996_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */ +#define WM8996_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */ + +/* + * R97 (0x61) - Analogue HP (2) + */ +#define WM8996_HPOUT2L_RMV_SHORT 0x0080 /* HPOUT2L_RMV_SHORT */ +#define WM8996_HPOUT2L_RMV_SHORT_MASK 0x0080 /* HPOUT2L_RMV_SHORT */ +#define WM8996_HPOUT2L_RMV_SHORT_SHIFT 7 /* HPOUT2L_RMV_SHORT */ +#define WM8996_HPOUT2L_RMV_SHORT_WIDTH 1 /* HPOUT2L_RMV_SHORT */ +#define WM8996_HPOUT2L_OUTP 0x0040 /* HPOUT2L_OUTP */ +#define WM8996_HPOUT2L_OUTP_MASK 0x0040 /* HPOUT2L_OUTP */ +#define WM8996_HPOUT2L_OUTP_SHIFT 6 /* HPOUT2L_OUTP */ +#define WM8996_HPOUT2L_OUTP_WIDTH 1 /* HPOUT2L_OUTP */ +#define WM8996_HPOUT2L_DLY 0x0020 /* HPOUT2L_DLY */ +#define WM8996_HPOUT2L_DLY_MASK 0x0020 /* HPOUT2L_DLY */ +#define WM8996_HPOUT2L_DLY_SHIFT 5 /* HPOUT2L_DLY */ +#define WM8996_HPOUT2L_DLY_WIDTH 1 /* HPOUT2L_DLY */ +#define WM8996_HPOUT2R_RMV_SHORT 0x0008 /* HPOUT2R_RMV_SHORT */ +#define WM8996_HPOUT2R_RMV_SHORT_MASK 0x0008 /* HPOUT2R_RMV_SHORT */ +#define WM8996_HPOUT2R_RMV_SHORT_SHIFT 3 /* HPOUT2R_RMV_SHORT */ +#define WM8996_HPOUT2R_RMV_SHORT_WIDTH 1 /* HPOUT2R_RMV_SHORT */ +#define WM8996_HPOUT2R_OUTP 0x0004 /* HPOUT2R_OUTP */ +#define WM8996_HPOUT2R_OUTP_MASK 0x0004 /* HPOUT2R_OUTP */ +#define WM8996_HPOUT2R_OUTP_SHIFT 2 /* HPOUT2R_OUTP */ +#define WM8996_HPOUT2R_OUTP_WIDTH 1 /* HPOUT2R_OUTP */ +#define WM8996_HPOUT2R_DLY 0x0002 /* HPOUT2R_DLY */ +#define WM8996_HPOUT2R_DLY_MASK 0x0002 /* HPOUT2R_DLY */ +#define WM8996_HPOUT2R_DLY_SHIFT 1 /* HPOUT2R_DLY */ +#define WM8996_HPOUT2R_DLY_WIDTH 1 /* HPOUT2R_DLY */ + +/* + * R256 (0x100) - Chip Revision + */ +#define WM8996_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */ +#define WM8996_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */ +#define WM8996_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */ + +/* + * R257 (0x101) - Control Interface (1) + */ +#define WM8996_AUTO_INC 0x0004 /* AUTO_INC */ +#define WM8996_AUTO_INC_MASK 0x0004 /* AUTO_INC */ +#define WM8996_AUTO_INC_SHIFT 2 /* AUTO_INC */ +#define WM8996_AUTO_INC_WIDTH 1 /* AUTO_INC */ + +/* + * R272 (0x110) - Write Sequencer Ctrl (1) + */ +#define WM8996_WSEQ_ENA 0x8000 /* WSEQ_ENA */ +#define WM8996_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */ +#define WM8996_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */ +#define WM8996_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */ +#define WM8996_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */ +#define WM8996_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */ +#define WM8996_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */ +#define WM8996_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */ +#define WM8996_WSEQ_START 0x0100 /* WSEQ_START */ +#define WM8996_WSEQ_START_MASK 0x0100 /* WSEQ_START */ +#define WM8996_WSEQ_START_SHIFT 8 /* WSEQ_START */ +#define WM8996_WSEQ_START_WIDTH 1 /* WSEQ_START */ +#define WM8996_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */ +#define WM8996_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */ +#define WM8996_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */ + +/* + * R273 (0x111) - Write Sequencer Ctrl (2) + */ +#define WM8996_WSEQ_BUSY 0x0100 /* WSEQ_BUSY */ +#define WM8996_WSEQ_BUSY_MASK 0x0100 /* WSEQ_BUSY */ +#define WM8996_WSEQ_BUSY_SHIFT 8 /* WSEQ_BUSY */ +#define WM8996_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */ +#define WM8996_WSEQ_CURRENT_INDEX_MASK 0x007F /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8996_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [6:0] */ +#define WM8996_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [6:0] */ + +/* + * R512 (0x200) - AIF Clocking (1) + */ +#define WM8996_SYSCLK_SRC_MASK 0x0018 /* SYSCLK_SRC - [4:3] */ +#define WM8996_SYSCLK_SRC_SHIFT 3 /* SYSCLK_SRC - [4:3] */ +#define WM8996_SYSCLK_SRC_WIDTH 2 /* SYSCLK_SRC - [4:3] */ +#define WM8996_SYSCLK_INV 0x0004 /* SYSCLK_INV */ +#define WM8996_SYSCLK_INV_MASK 0x0004 /* SYSCLK_INV */ +#define WM8996_SYSCLK_INV_SHIFT 2 /* SYSCLK_INV */ +#define WM8996_SYSCLK_INV_WIDTH 1 /* SYSCLK_INV */ +#define WM8996_SYSCLK_DIV 0x0002 /* SYSCLK_DIV */ +#define WM8996_SYSCLK_DIV_MASK 0x0002 /* SYSCLK_DIV */ +#define WM8996_SYSCLK_DIV_SHIFT 1 /* SYSCLK_DIV */ +#define WM8996_SYSCLK_DIV_WIDTH 1 /* SYSCLK_DIV */ +#define WM8996_SYSCLK_ENA 0x0001 /* SYSCLK_ENA */ +#define WM8996_SYSCLK_ENA_MASK 0x0001 /* SYSCLK_ENA */ +#define WM8996_SYSCLK_ENA_SHIFT 0 /* SYSCLK_ENA */ +#define WM8996_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */ + +/* + * R513 (0x201) - AIF Clocking (2) + */ +#define WM8996_DSP2_DIV_MASK 0x0018 /* DSP2_DIV - [4:3] */ +#define WM8996_DSP2_DIV_SHIFT 3 /* DSP2_DIV - [4:3] */ +#define WM8996_DSP2_DIV_WIDTH 2 /* DSP2_DIV - [4:3] */ +#define WM8996_DSP1_DIV_MASK 0x0003 /* DSP1_DIV - [1:0] */ +#define WM8996_DSP1_DIV_SHIFT 0 /* DSP1_DIV - [1:0] */ +#define WM8996_DSP1_DIV_WIDTH 2 /* DSP1_DIV - [1:0] */ + +/* + * R520 (0x208) - Clocking (1) + */ +#define WM8996_LFCLK_ENA 0x0020 /* LFCLK_ENA */ +#define WM8996_LFCLK_ENA_MASK 0x0020 /* LFCLK_ENA */ +#define WM8996_LFCLK_ENA_SHIFT 5 /* LFCLK_ENA */ +#define WM8996_LFCLK_ENA_WIDTH 1 /* LFCLK_ENA */ +#define WM8996_TOCLK_ENA 0x0010 /* TOCLK_ENA */ +#define WM8996_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */ +#define WM8996_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */ +#define WM8996_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */ +#define WM8996_AIFCLK_ENA 0x0004 /* AIFCLK_ENA */ +#define WM8996_AIFCLK_ENA_MASK 0x0004 /* AIFCLK_ENA */ +#define WM8996_AIFCLK_ENA_SHIFT 2 /* AIFCLK_ENA */ +#define WM8996_AIFCLK_ENA_WIDTH 1 /* AIFCLK_ENA */ +#define WM8996_SYSDSPCLK_ENA 0x0002 /* SYSDSPCLK_ENA */ +#define WM8996_SYSDSPCLK_ENA_MASK 0x0002 /* SYSDSPCLK_ENA */ +#define WM8996_SYSDSPCLK_ENA_SHIFT 1 /* SYSDSPCLK_ENA */ +#define WM8996_SYSDSPCLK_ENA_WIDTH 1 /* SYSDSPCLK_ENA */ + +/* + * R521 (0x209) - Clocking (2) + */ +#define WM8996_TOCLK_DIV_MASK 0x0700 /* TOCLK_DIV - [10:8] */ +#define WM8996_TOCLK_DIV_SHIFT 8 /* TOCLK_DIV - [10:8] */ +#define WM8996_TOCLK_DIV_WIDTH 3 /* TOCLK_DIV - [10:8] */ +#define WM8996_DBCLK_DIV_MASK 0x00F0 /* DBCLK_DIV - [7:4] */ +#define WM8996_DBCLK_DIV_SHIFT 4 /* DBCLK_DIV - [7:4] */ +#define WM8996_DBCLK_DIV_WIDTH 4 /* DBCLK_DIV - [7:4] */ +#define WM8996_OPCLK_DIV_MASK 0x0007 /* OPCLK_DIV - [2:0] */ +#define WM8996_OPCLK_DIV_SHIFT 0 /* OPCLK_DIV - [2:0] */ +#define WM8996_OPCLK_DIV_WIDTH 3 /* OPCLK_DIV - [2:0] */ + +/* + * R528 (0x210) - AIF Rate + */ +#define WM8996_SYSCLK_RATE 0x0001 /* SYSCLK_RATE */ +#define WM8996_SYSCLK_RATE_MASK 0x0001 /* SYSCLK_RATE */ +#define WM8996_SYSCLK_RATE_SHIFT 0 /* SYSCLK_RATE */ +#define WM8996_SYSCLK_RATE_WIDTH 1 /* SYSCLK_RATE */ + +/* + * R544 (0x220) - FLL Control (1) + */ +#define WM8996_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */ +#define WM8996_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */ +#define WM8996_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */ +#define WM8996_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */ +#define WM8996_FLL_ENA 0x0001 /* FLL_ENA */ +#define WM8996_FLL_ENA_MASK 0x0001 /* FLL_ENA */ +#define WM8996_FLL_ENA_SHIFT 0 /* FLL_ENA */ +#define WM8996_FLL_ENA_WIDTH 1 /* FLL_ENA */ + +/* + * R545 (0x221) - FLL Control (2) + */ +#define WM8996_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */ +#define WM8996_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */ +#define WM8996_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */ +#define WM8996_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */ +#define WM8996_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */ +#define WM8996_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */ + +/* + * R546 (0x222) - FLL Control (3) + */ +#define WM8996_FLL_THETA_MASK 0xFFFF /* FLL_THETA - [15:0] */ +#define WM8996_FLL_THETA_SHIFT 0 /* FLL_THETA - [15:0] */ +#define WM8996_FLL_THETA_WIDTH 16 /* FLL_THETA - [15:0] */ + +/* + * R547 (0x223) - FLL Control (4) + */ +#define WM8996_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */ +#define WM8996_FLL_N_SHIFT 5 /* FLL_N - [14:5] */ +#define WM8996_FLL_N_WIDTH 10 /* FLL_N - [14:5] */ +#define WM8996_FLL_LOOP_GAIN_MASK 0x000F /* FLL_LOOP_GAIN - [3:0] */ +#define WM8996_FLL_LOOP_GAIN_SHIFT 0 /* FLL_LOOP_GAIN - [3:0] */ +#define WM8996_FLL_LOOP_GAIN_WIDTH 4 /* FLL_LOOP_GAIN - [3:0] */ + +/* + * R548 (0x224) - FLL Control (5) + */ +#define WM8996_FLL_FRC_NCO_VAL_MASK 0x1F80 /* FLL_FRC_NCO_VAL - [12:7] */ +#define WM8996_FLL_FRC_NCO_VAL_SHIFT 7 /* FLL_FRC_NCO_VAL - [12:7] */ +#define WM8996_FLL_FRC_NCO_VAL_WIDTH 6 /* FLL_FRC_NCO_VAL - [12:7] */ +#define WM8996_FLL_FRC_NCO 0x0040 /* FLL_FRC_NCO */ +#define WM8996_FLL_FRC_NCO_MASK 0x0040 /* FLL_FRC_NCO */ +#define WM8996_FLL_FRC_NCO_SHIFT 6 /* FLL_FRC_NCO */ +#define WM8996_FLL_FRC_NCO_WIDTH 1 /* FLL_FRC_NCO */ +#define WM8996_FLL_REFCLK_DIV_MASK 0x0018 /* FLL_REFCLK_DIV - [4:3] */ +#define WM8996_FLL_REFCLK_DIV_SHIFT 3 /* FLL_REFCLK_DIV - [4:3] */ +#define WM8996_FLL_REFCLK_DIV_WIDTH 2 /* FLL_REFCLK_DIV - [4:3] */ +#define WM8996_FLL_REF_FREQ 0x0004 /* FLL_REF_FREQ */ +#define WM8996_FLL_REF_FREQ_MASK 0x0004 /* FLL_REF_FREQ */ +#define WM8996_FLL_REF_FREQ_SHIFT 2 /* FLL_REF_FREQ */ +#define WM8996_FLL_REF_FREQ_WIDTH 1 /* FLL_REF_FREQ */ +#define WM8996_FLL_REFCLK_SRC_MASK 0x0003 /* FLL_REFCLK_SRC - [1:0] */ +#define WM8996_FLL_REFCLK_SRC_SHIFT 0 /* FLL_REFCLK_SRC - [1:0] */ +#define WM8996_FLL_REFCLK_SRC_WIDTH 2 /* FLL_REFCLK_SRC - [1:0] */ + +/* + * R549 (0x225) - FLL Control (6) + */ +#define WM8996_FLL_REFCLK_SRC_STS_MASK 0x000C /* FLL_REFCLK_SRC_STS - [3:2] */ +#define WM8996_FLL_REFCLK_SRC_STS_SHIFT 2 /* FLL_REFCLK_SRC_STS - [3:2] */ +#define WM8996_FLL_REFCLK_SRC_STS_WIDTH 2 /* FLL_REFCLK_SRC_STS - [3:2] */ +#define WM8996_FLL_SWITCH_CLK 0x0001 /* FLL_SWITCH_CLK */ +#define WM8996_FLL_SWITCH_CLK_MASK 0x0001 /* FLL_SWITCH_CLK */ +#define WM8996_FLL_SWITCH_CLK_SHIFT 0 /* FLL_SWITCH_CLK */ +#define WM8996_FLL_SWITCH_CLK_WIDTH 1 /* FLL_SWITCH_CLK */ + +/* + * R550 (0x226) - FLL EFS 1 + */ +#define WM8996_FLL_LAMBDA_MASK 0xFFFF /* FLL_LAMBDA - [15:0] */ +#define WM8996_FLL_LAMBDA_SHIFT 0 /* FLL_LAMBDA - [15:0] */ +#define WM8996_FLL_LAMBDA_WIDTH 16 /* FLL_LAMBDA - [15:0] */ + +/* + * R551 (0x227) - FLL EFS 2 + */ +#define WM8996_FLL_LFSR_SEL_MASK 0x0006 /* FLL_LFSR_SEL - [2:1] */ +#define WM8996_FLL_LFSR_SEL_SHIFT 1 /* FLL_LFSR_SEL - [2:1] */ +#define WM8996_FLL_LFSR_SEL_WIDTH 2 /* FLL_LFSR_SEL - [2:1] */ +#define WM8996_FLL_EFS_ENA 0x0001 /* FLL_EFS_ENA */ +#define WM8996_FLL_EFS_ENA_MASK 0x0001 /* FLL_EFS_ENA */ +#define WM8996_FLL_EFS_ENA_SHIFT 0 /* FLL_EFS_ENA */ +#define WM8996_FLL_EFS_ENA_WIDTH 1 /* FLL_EFS_ENA */ + +/* + * R768 (0x300) - AIF1 Control + */ +#define WM8996_AIF1_TRI 0x0004 /* AIF1_TRI */ +#define WM8996_AIF1_TRI_MASK 0x0004 /* AIF1_TRI */ +#define WM8996_AIF1_TRI_SHIFT 2 /* AIF1_TRI */ +#define WM8996_AIF1_TRI_WIDTH 1 /* AIF1_TRI */ +#define WM8996_AIF1_FMT_MASK 0x0003 /* AIF1_FMT - [1:0] */ +#define WM8996_AIF1_FMT_SHIFT 0 /* AIF1_FMT - [1:0] */ +#define WM8996_AIF1_FMT_WIDTH 2 /* AIF1_FMT - [1:0] */ + +/* + * R769 (0x301) - AIF1 BCLK + */ +#define WM8996_AIF1_BCLK_INV 0x0400 /* AIF1_BCLK_INV */ +#define WM8996_AIF1_BCLK_INV_MASK 0x0400 /* AIF1_BCLK_INV */ +#define WM8996_AIF1_BCLK_INV_SHIFT 10 /* AIF1_BCLK_INV */ +#define WM8996_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */ +#define WM8996_AIF1_BCLK_FRC 0x0200 /* AIF1_BCLK_FRC */ +#define WM8996_AIF1_BCLK_FRC_MASK 0x0200 /* AIF1_BCLK_FRC */ +#define WM8996_AIF1_BCLK_FRC_SHIFT 9 /* AIF1_BCLK_FRC */ +#define WM8996_AIF1_BCLK_FRC_WIDTH 1 /* AIF1_BCLK_FRC */ +#define WM8996_AIF1_BCLK_MSTR 0x0100 /* AIF1_BCLK_MSTR */ +#define WM8996_AIF1_BCLK_MSTR_MASK 0x0100 /* AIF1_BCLK_MSTR */ +#define WM8996_AIF1_BCLK_MSTR_SHIFT 8 /* AIF1_BCLK_MSTR */ +#define WM8996_AIF1_BCLK_MSTR_WIDTH 1 /* AIF1_BCLK_MSTR */ +#define WM8996_AIF1_BCLK_DIV_MASK 0x000F /* AIF1_BCLK_DIV - [3:0] */ +#define WM8996_AIF1_BCLK_DIV_SHIFT 0 /* AIF1_BCLK_DIV - [3:0] */ +#define WM8996_AIF1_BCLK_DIV_WIDTH 4 /* AIF1_BCLK_DIV - [3:0] */ + +/* + * R770 (0x302) - AIF1 TX LRCLK(1) + */ +#define WM8996_AIF1TX_RATE_MASK 0x07FF /* AIF1TX_RATE - [10:0] */ +#define WM8996_AIF1TX_RATE_SHIFT 0 /* AIF1TX_RATE - [10:0] */ +#define WM8996_AIF1TX_RATE_WIDTH 11 /* AIF1TX_RATE - [10:0] */ + +/* + * R771 (0x303) - AIF1 TX LRCLK(2) + */ +#define WM8996_AIF1TX_LRCLK_MODE 0x0008 /* AIF1TX_LRCLK_MODE */ +#define WM8996_AIF1TX_LRCLK_MODE_MASK 0x0008 /* AIF1TX_LRCLK_MODE */ +#define WM8996_AIF1TX_LRCLK_MODE_SHIFT 3 /* AIF1TX_LRCLK_MODE */ +#define WM8996_AIF1TX_LRCLK_MODE_WIDTH 1 /* AIF1TX_LRCLK_MODE */ +#define WM8996_AIF1TX_LRCLK_INV 0x0004 /* AIF1TX_LRCLK_INV */ +#define WM8996_AIF1TX_LRCLK_INV_MASK 0x0004 /* AIF1TX_LRCLK_INV */ +#define WM8996_AIF1TX_LRCLK_INV_SHIFT 2 /* AIF1TX_LRCLK_INV */ +#define WM8996_AIF1TX_LRCLK_INV_WIDTH 1 /* AIF1TX_LRCLK_INV */ +#define WM8996_AIF1TX_LRCLK_FRC 0x0002 /* AIF1TX_LRCLK_FRC */ +#define WM8996_AIF1TX_LRCLK_FRC_MASK 0x0002 /* AIF1TX_LRCLK_FRC */ +#define WM8996_AIF1TX_LRCLK_FRC_SHIFT 1 /* AIF1TX_LRCLK_FRC */ +#define WM8996_AIF1TX_LRCLK_FRC_WIDTH 1 /* AIF1TX_LRCLK_FRC */ +#define WM8996_AIF1TX_LRCLK_MSTR 0x0001 /* AIF1TX_LRCLK_MSTR */ +#define WM8996_AIF1TX_LRCLK_MSTR_MASK 0x0001 /* AIF1TX_LRCLK_MSTR */ +#define WM8996_AIF1TX_LRCLK_MSTR_SHIFT 0 /* AIF1TX_LRCLK_MSTR */ +#define WM8996_AIF1TX_LRCLK_MSTR_WIDTH 1 /* AIF1TX_LRCLK_MSTR */ + +/* + * R772 (0x304) - AIF1 RX LRCLK(1) + */ +#define WM8996_AIF1RX_RATE_MASK 0x07FF /* AIF1RX_RATE - [10:0] */ +#define WM8996_AIF1RX_RATE_SHIFT 0 /* AIF1RX_RATE - [10:0] */ +#define WM8996_AIF1RX_RATE_WIDTH 11 /* AIF1RX_RATE - [10:0] */ + +/* + * R773 (0x305) - AIF1 RX LRCLK(2) + */ +#define WM8996_AIF1RX_LRCLK_INV 0x0004 /* AIF1RX_LRCLK_INV */ +#define WM8996_AIF1RX_LRCLK_INV_MASK 0x0004 /* AIF1RX_LRCLK_INV */ +#define WM8996_AIF1RX_LRCLK_INV_SHIFT 2 /* AIF1RX_LRCLK_INV */ +#define WM8996_AIF1RX_LRCLK_INV_WIDTH 1 /* AIF1RX_LRCLK_INV */ +#define WM8996_AIF1RX_LRCLK_FRC 0x0002 /* AIF1RX_LRCLK_FRC */ +#define WM8996_AIF1RX_LRCLK_FRC_MASK 0x0002 /* AIF1RX_LRCLK_FRC */ +#define WM8996_AIF1RX_LRCLK_FRC_SHIFT 1 /* AIF1RX_LRCLK_FRC */ +#define WM8996_AIF1RX_LRCLK_FRC_WIDTH 1 /* AIF1RX_LRCLK_FRC */ +#define WM8996_AIF1RX_LRCLK_MSTR 0x0001 /* AIF1RX_LRCLK_MSTR */ +#define WM8996_AIF1RX_LRCLK_MSTR_MASK 0x0001 /* AIF1RX_LRCLK_MSTR */ +#define WM8996_AIF1RX_LRCLK_MSTR_SHIFT 0 /* AIF1RX_LRCLK_MSTR */ +#define WM8996_AIF1RX_LRCLK_MSTR_WIDTH 1 /* AIF1RX_LRCLK_MSTR */ + +/* + * R774 (0x306) - AIF1TX Data Configuration (1) + */ +#define WM8996_AIF1TX_WL_MASK 0xFF00 /* AIF1TX_WL - [15:8] */ +#define WM8996_AIF1TX_WL_SHIFT 8 /* AIF1TX_WL - [15:8] */ +#define WM8996_AIF1TX_WL_WIDTH 8 /* AIF1TX_WL - [15:8] */ +#define WM8996_AIF1TX_SLOT_LEN_MASK 0x00FF /* AIF1TX_SLOT_LEN - [7:0] */ +#define WM8996_AIF1TX_SLOT_LEN_SHIFT 0 /* AIF1TX_SLOT_LEN - [7:0] */ +#define WM8996_AIF1TX_SLOT_LEN_WIDTH 8 /* AIF1TX_SLOT_LEN - [7:0] */ + +/* + * R775 (0x307) - AIF1TX Data Configuration (2) + */ +#define WM8996_AIF1TX_DAT_TRI 0x0001 /* AIF1TX_DAT_TRI */ +#define WM8996_AIF1TX_DAT_TRI_MASK 0x0001 /* AIF1TX_DAT_TRI */ +#define WM8996_AIF1TX_DAT_TRI_SHIFT 0 /* AIF1TX_DAT_TRI */ +#define WM8996_AIF1TX_DAT_TRI_WIDTH 1 /* AIF1TX_DAT_TRI */ + +/* + * R776 (0x308) - AIF1RX Data Configuration + */ +#define WM8996_AIF1RX_WL_MASK 0xFF00 /* AIF1RX_WL - [15:8] */ +#define WM8996_AIF1RX_WL_SHIFT 8 /* AIF1RX_WL - [15:8] */ +#define WM8996_AIF1RX_WL_WIDTH 8 /* AIF1RX_WL - [15:8] */ +#define WM8996_AIF1RX_SLOT_LEN_MASK 0x00FF /* AIF1RX_SLOT_LEN - [7:0] */ +#define WM8996_AIF1RX_SLOT_LEN_SHIFT 0 /* AIF1RX_SLOT_LEN - [7:0] */ +#define WM8996_AIF1RX_SLOT_LEN_WIDTH 8 /* AIF1RX_SLOT_LEN - [7:0] */ + +/* + * R777 (0x309) - AIF1TX Channel 0 Configuration + */ +#define WM8996_AIF1TX_CHAN0_DAT_INV 0x8000 /* AIF1TX_CHAN0_DAT_INV */ +#define WM8996_AIF1TX_CHAN0_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN0_DAT_INV */ +#define WM8996_AIF1TX_CHAN0_DAT_INV_SHIFT 15 /* AIF1TX_CHAN0_DAT_INV */ +#define WM8996_AIF1TX_CHAN0_DAT_INV_WIDTH 1 /* AIF1TX_CHAN0_DAT_INV */ +#define WM8996_AIF1TX_CHAN0_SPACING_MASK 0x7E00 /* AIF1TX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN0_SPACING_SHIFT 9 /* AIF1TX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN0_SPACING_WIDTH 6 /* AIF1TX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN0_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN0_SLOTS_SHIFT 6 /* AIF1TX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN0_SLOTS_WIDTH 3 /* AIF1TX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN0_START_SLOT_MASK 0x003F /* AIF1TX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN0_START_SLOT_SHIFT 0 /* AIF1TX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN0_START_SLOT_WIDTH 6 /* AIF1TX_CHAN0_START_SLOT - [5:0] */ + +/* + * R778 (0x30A) - AIF1TX Channel 1 Configuration + */ +#define WM8996_AIF1TX_CHAN1_DAT_INV 0x8000 /* AIF1TX_CHAN1_DAT_INV */ +#define WM8996_AIF1TX_CHAN1_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN1_DAT_INV */ +#define WM8996_AIF1TX_CHAN1_DAT_INV_SHIFT 15 /* AIF1TX_CHAN1_DAT_INV */ +#define WM8996_AIF1TX_CHAN1_DAT_INV_WIDTH 1 /* AIF1TX_CHAN1_DAT_INV */ +#define WM8996_AIF1TX_CHAN1_SPACING_MASK 0x7E00 /* AIF1TX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN1_SPACING_SHIFT 9 /* AIF1TX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN1_SPACING_WIDTH 6 /* AIF1TX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN1_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN1_SLOTS_SHIFT 6 /* AIF1TX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN1_SLOTS_WIDTH 3 /* AIF1TX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN1_START_SLOT_MASK 0x003F /* AIF1TX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN1_START_SLOT_SHIFT 0 /* AIF1TX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN1_START_SLOT_WIDTH 6 /* AIF1TX_CHAN1_START_SLOT - [5:0] */ + +/* + * R779 (0x30B) - AIF1TX Channel 2 Configuration + */ +#define WM8996_AIF1TX_CHAN2_DAT_INV 0x8000 /* AIF1TX_CHAN2_DAT_INV */ +#define WM8996_AIF1TX_CHAN2_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN2_DAT_INV */ +#define WM8996_AIF1TX_CHAN2_DAT_INV_SHIFT 15 /* AIF1TX_CHAN2_DAT_INV */ +#define WM8996_AIF1TX_CHAN2_DAT_INV_WIDTH 1 /* AIF1TX_CHAN2_DAT_INV */ +#define WM8996_AIF1TX_CHAN2_SPACING_MASK 0x7E00 /* AIF1TX_CHAN2_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN2_SPACING_SHIFT 9 /* AIF1TX_CHAN2_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN2_SPACING_WIDTH 6 /* AIF1TX_CHAN2_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN2_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN2_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN2_SLOTS_SHIFT 6 /* AIF1TX_CHAN2_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN2_SLOTS_WIDTH 3 /* AIF1TX_CHAN2_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN2_START_SLOT_MASK 0x003F /* AIF1TX_CHAN2_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN2_START_SLOT_SHIFT 0 /* AIF1TX_CHAN2_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN2_START_SLOT_WIDTH 6 /* AIF1TX_CHAN2_START_SLOT - [5:0] */ + +/* + * R780 (0x30C) - AIF1TX Channel 3 Configuration + */ +#define WM8996_AIF1TX_CHAN3_DAT_INV 0x8000 /* AIF1TX_CHAN3_DAT_INV */ +#define WM8996_AIF1TX_CHAN3_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN3_DAT_INV */ +#define WM8996_AIF1TX_CHAN3_DAT_INV_SHIFT 15 /* AIF1TX_CHAN3_DAT_INV */ +#define WM8996_AIF1TX_CHAN3_DAT_INV_WIDTH 1 /* AIF1TX_CHAN3_DAT_INV */ +#define WM8996_AIF1TX_CHAN3_SPACING_MASK 0x7E00 /* AIF1TX_CHAN3_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN3_SPACING_SHIFT 9 /* AIF1TX_CHAN3_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN3_SPACING_WIDTH 6 /* AIF1TX_CHAN3_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN3_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN3_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN3_SLOTS_SHIFT 6 /* AIF1TX_CHAN3_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN3_SLOTS_WIDTH 3 /* AIF1TX_CHAN3_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN3_START_SLOT_MASK 0x003F /* AIF1TX_CHAN3_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN3_START_SLOT_SHIFT 0 /* AIF1TX_CHAN3_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN3_START_SLOT_WIDTH 6 /* AIF1TX_CHAN3_START_SLOT - [5:0] */ + +/* + * R781 (0x30D) - AIF1TX Channel 4 Configuration + */ +#define WM8996_AIF1TX_CHAN4_DAT_INV 0x8000 /* AIF1TX_CHAN4_DAT_INV */ +#define WM8996_AIF1TX_CHAN4_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN4_DAT_INV */ +#define WM8996_AIF1TX_CHAN4_DAT_INV_SHIFT 15 /* AIF1TX_CHAN4_DAT_INV */ +#define WM8996_AIF1TX_CHAN4_DAT_INV_WIDTH 1 /* AIF1TX_CHAN4_DAT_INV */ +#define WM8996_AIF1TX_CHAN4_SPACING_MASK 0x7E00 /* AIF1TX_CHAN4_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN4_SPACING_SHIFT 9 /* AIF1TX_CHAN4_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN4_SPACING_WIDTH 6 /* AIF1TX_CHAN4_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN4_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN4_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN4_SLOTS_SHIFT 6 /* AIF1TX_CHAN4_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN4_SLOTS_WIDTH 3 /* AIF1TX_CHAN4_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN4_START_SLOT_MASK 0x003F /* AIF1TX_CHAN4_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN4_START_SLOT_SHIFT 0 /* AIF1TX_CHAN4_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN4_START_SLOT_WIDTH 6 /* AIF1TX_CHAN4_START_SLOT - [5:0] */ + +/* + * R782 (0x30E) - AIF1TX Channel 5 Configuration + */ +#define WM8996_AIF1TX_CHAN5_DAT_INV 0x8000 /* AIF1TX_CHAN5_DAT_INV */ +#define WM8996_AIF1TX_CHAN5_DAT_INV_MASK 0x8000 /* AIF1TX_CHAN5_DAT_INV */ +#define WM8996_AIF1TX_CHAN5_DAT_INV_SHIFT 15 /* AIF1TX_CHAN5_DAT_INV */ +#define WM8996_AIF1TX_CHAN5_DAT_INV_WIDTH 1 /* AIF1TX_CHAN5_DAT_INV */ +#define WM8996_AIF1TX_CHAN5_SPACING_MASK 0x7E00 /* AIF1TX_CHAN5_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN5_SPACING_SHIFT 9 /* AIF1TX_CHAN5_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN5_SPACING_WIDTH 6 /* AIF1TX_CHAN5_SPACING - [14:9] */ +#define WM8996_AIF1TX_CHAN5_SLOTS_MASK 0x01C0 /* AIF1TX_CHAN5_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN5_SLOTS_SHIFT 6 /* AIF1TX_CHAN5_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN5_SLOTS_WIDTH 3 /* AIF1TX_CHAN5_SLOTS - [8:6] */ +#define WM8996_AIF1TX_CHAN5_START_SLOT_MASK 0x003F /* AIF1TX_CHAN5_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN5_START_SLOT_SHIFT 0 /* AIF1TX_CHAN5_START_SLOT - [5:0] */ +#define WM8996_AIF1TX_CHAN5_START_SLOT_WIDTH 6 /* AIF1TX_CHAN5_START_SLOT - [5:0] */ + +/* + * R783 (0x30F) - AIF1RX Channel 0 Configuration + */ +#define WM8996_AIF1RX_CHAN0_DAT_INV 0x8000 /* AIF1RX_CHAN0_DAT_INV */ +#define WM8996_AIF1RX_CHAN0_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN0_DAT_INV */ +#define WM8996_AIF1RX_CHAN0_DAT_INV_SHIFT 15 /* AIF1RX_CHAN0_DAT_INV */ +#define WM8996_AIF1RX_CHAN0_DAT_INV_WIDTH 1 /* AIF1RX_CHAN0_DAT_INV */ +#define WM8996_AIF1RX_CHAN0_SPACING_MASK 0x7E00 /* AIF1RX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN0_SPACING_SHIFT 9 /* AIF1RX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN0_SPACING_WIDTH 6 /* AIF1RX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN0_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN0_SLOTS_SHIFT 6 /* AIF1RX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN0_SLOTS_WIDTH 3 /* AIF1RX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN0_START_SLOT_MASK 0x003F /* AIF1RX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN0_START_SLOT_SHIFT 0 /* AIF1RX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN0_START_SLOT_WIDTH 6 /* AIF1RX_CHAN0_START_SLOT - [5:0] */ + +/* + * R784 (0x310) - AIF1RX Channel 1 Configuration + */ +#define WM8996_AIF1RX_CHAN1_DAT_INV 0x8000 /* AIF1RX_CHAN1_DAT_INV */ +#define WM8996_AIF1RX_CHAN1_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN1_DAT_INV */ +#define WM8996_AIF1RX_CHAN1_DAT_INV_SHIFT 15 /* AIF1RX_CHAN1_DAT_INV */ +#define WM8996_AIF1RX_CHAN1_DAT_INV_WIDTH 1 /* AIF1RX_CHAN1_DAT_INV */ +#define WM8996_AIF1RX_CHAN1_SPACING_MASK 0x7E00 /* AIF1RX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN1_SPACING_SHIFT 9 /* AIF1RX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN1_SPACING_WIDTH 6 /* AIF1RX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN1_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN1_SLOTS_SHIFT 6 /* AIF1RX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN1_SLOTS_WIDTH 3 /* AIF1RX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN1_START_SLOT_MASK 0x003F /* AIF1RX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN1_START_SLOT_SHIFT 0 /* AIF1RX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN1_START_SLOT_WIDTH 6 /* AIF1RX_CHAN1_START_SLOT - [5:0] */ + +/* + * R785 (0x311) - AIF1RX Channel 2 Configuration + */ +#define WM8996_AIF1RX_CHAN2_DAT_INV 0x8000 /* AIF1RX_CHAN2_DAT_INV */ +#define WM8996_AIF1RX_CHAN2_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN2_DAT_INV */ +#define WM8996_AIF1RX_CHAN2_DAT_INV_SHIFT 15 /* AIF1RX_CHAN2_DAT_INV */ +#define WM8996_AIF1RX_CHAN2_DAT_INV_WIDTH 1 /* AIF1RX_CHAN2_DAT_INV */ +#define WM8996_AIF1RX_CHAN2_SPACING_MASK 0x7E00 /* AIF1RX_CHAN2_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN2_SPACING_SHIFT 9 /* AIF1RX_CHAN2_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN2_SPACING_WIDTH 6 /* AIF1RX_CHAN2_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN2_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN2_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN2_SLOTS_SHIFT 6 /* AIF1RX_CHAN2_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN2_SLOTS_WIDTH 3 /* AIF1RX_CHAN2_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN2_START_SLOT_MASK 0x003F /* AIF1RX_CHAN2_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN2_START_SLOT_SHIFT 0 /* AIF1RX_CHAN2_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN2_START_SLOT_WIDTH 6 /* AIF1RX_CHAN2_START_SLOT - [5:0] */ + +/* + * R786 (0x312) - AIF1RX Channel 3 Configuration + */ +#define WM8996_AIF1RX_CHAN3_DAT_INV 0x8000 /* AIF1RX_CHAN3_DAT_INV */ +#define WM8996_AIF1RX_CHAN3_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN3_DAT_INV */ +#define WM8996_AIF1RX_CHAN3_DAT_INV_SHIFT 15 /* AIF1RX_CHAN3_DAT_INV */ +#define WM8996_AIF1RX_CHAN3_DAT_INV_WIDTH 1 /* AIF1RX_CHAN3_DAT_INV */ +#define WM8996_AIF1RX_CHAN3_SPACING_MASK 0x7E00 /* AIF1RX_CHAN3_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN3_SPACING_SHIFT 9 /* AIF1RX_CHAN3_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN3_SPACING_WIDTH 6 /* AIF1RX_CHAN3_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN3_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN3_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN3_SLOTS_SHIFT 6 /* AIF1RX_CHAN3_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN3_SLOTS_WIDTH 3 /* AIF1RX_CHAN3_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN3_START_SLOT_MASK 0x003F /* AIF1RX_CHAN3_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN3_START_SLOT_SHIFT 0 /* AIF1RX_CHAN3_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN3_START_SLOT_WIDTH 6 /* AIF1RX_CHAN3_START_SLOT - [5:0] */ + +/* + * R787 (0x313) - AIF1RX Channel 4 Configuration + */ +#define WM8996_AIF1RX_CHAN4_DAT_INV 0x8000 /* AIF1RX_CHAN4_DAT_INV */ +#define WM8996_AIF1RX_CHAN4_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN4_DAT_INV */ +#define WM8996_AIF1RX_CHAN4_DAT_INV_SHIFT 15 /* AIF1RX_CHAN4_DAT_INV */ +#define WM8996_AIF1RX_CHAN4_DAT_INV_WIDTH 1 /* AIF1RX_CHAN4_DAT_INV */ +#define WM8996_AIF1RX_CHAN4_SPACING_MASK 0x7E00 /* AIF1RX_CHAN4_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN4_SPACING_SHIFT 9 /* AIF1RX_CHAN4_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN4_SPACING_WIDTH 6 /* AIF1RX_CHAN4_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN4_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN4_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN4_SLOTS_SHIFT 6 /* AIF1RX_CHAN4_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN4_SLOTS_WIDTH 3 /* AIF1RX_CHAN4_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN4_START_SLOT_MASK 0x003F /* AIF1RX_CHAN4_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN4_START_SLOT_SHIFT 0 /* AIF1RX_CHAN4_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN4_START_SLOT_WIDTH 6 /* AIF1RX_CHAN4_START_SLOT - [5:0] */ + +/* + * R788 (0x314) - AIF1RX Channel 5 Configuration + */ +#define WM8996_AIF1RX_CHAN5_DAT_INV 0x8000 /* AIF1RX_CHAN5_DAT_INV */ +#define WM8996_AIF1RX_CHAN5_DAT_INV_MASK 0x8000 /* AIF1RX_CHAN5_DAT_INV */ +#define WM8996_AIF1RX_CHAN5_DAT_INV_SHIFT 15 /* AIF1RX_CHAN5_DAT_INV */ +#define WM8996_AIF1RX_CHAN5_DAT_INV_WIDTH 1 /* AIF1RX_CHAN5_DAT_INV */ +#define WM8996_AIF1RX_CHAN5_SPACING_MASK 0x7E00 /* AIF1RX_CHAN5_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN5_SPACING_SHIFT 9 /* AIF1RX_CHAN5_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN5_SPACING_WIDTH 6 /* AIF1RX_CHAN5_SPACING - [14:9] */ +#define WM8996_AIF1RX_CHAN5_SLOTS_MASK 0x01C0 /* AIF1RX_CHAN5_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN5_SLOTS_SHIFT 6 /* AIF1RX_CHAN5_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN5_SLOTS_WIDTH 3 /* AIF1RX_CHAN5_SLOTS - [8:6] */ +#define WM8996_AIF1RX_CHAN5_START_SLOT_MASK 0x003F /* AIF1RX_CHAN5_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN5_START_SLOT_SHIFT 0 /* AIF1RX_CHAN5_START_SLOT - [5:0] */ +#define WM8996_AIF1RX_CHAN5_START_SLOT_WIDTH 6 /* AIF1RX_CHAN5_START_SLOT - [5:0] */ + +/* + * R789 (0x315) - AIF1RX Mono Configuration + */ +#define WM8996_AIF1RX_CHAN4_MONO_MODE 0x0004 /* AIF1RX_CHAN4_MONO_MODE */ +#define WM8996_AIF1RX_CHAN4_MONO_MODE_MASK 0x0004 /* AIF1RX_CHAN4_MONO_MODE */ +#define WM8996_AIF1RX_CHAN4_MONO_MODE_SHIFT 2 /* AIF1RX_CHAN4_MONO_MODE */ +#define WM8996_AIF1RX_CHAN4_MONO_MODE_WIDTH 1 /* AIF1RX_CHAN4_MONO_MODE */ +#define WM8996_AIF1RX_CHAN2_MONO_MODE 0x0002 /* AIF1RX_CHAN2_MONO_MODE */ +#define WM8996_AIF1RX_CHAN2_MONO_MODE_MASK 0x0002 /* AIF1RX_CHAN2_MONO_MODE */ +#define WM8996_AIF1RX_CHAN2_MONO_MODE_SHIFT 1 /* AIF1RX_CHAN2_MONO_MODE */ +#define WM8996_AIF1RX_CHAN2_MONO_MODE_WIDTH 1 /* AIF1RX_CHAN2_MONO_MODE */ +#define WM8996_AIF1RX_CHAN0_MONO_MODE 0x0001 /* AIF1RX_CHAN0_MONO_MODE */ +#define WM8996_AIF1RX_CHAN0_MONO_MODE_MASK 0x0001 /* AIF1RX_CHAN0_MONO_MODE */ +#define WM8996_AIF1RX_CHAN0_MONO_MODE_SHIFT 0 /* AIF1RX_CHAN0_MONO_MODE */ +#define WM8996_AIF1RX_CHAN0_MONO_MODE_WIDTH 1 /* AIF1RX_CHAN0_MONO_MODE */ + +/* + * R794 (0x31A) - AIF1TX Test + */ +#define WM8996_AIF1TX45_DITHER_ENA 0x0004 /* AIF1TX45_DITHER_ENA */ +#define WM8996_AIF1TX45_DITHER_ENA_MASK 0x0004 /* AIF1TX45_DITHER_ENA */ +#define WM8996_AIF1TX45_DITHER_ENA_SHIFT 2 /* AIF1TX45_DITHER_ENA */ +#define WM8996_AIF1TX45_DITHER_ENA_WIDTH 1 /* AIF1TX45_DITHER_ENA */ +#define WM8996_AIF1TX23_DITHER_ENA 0x0002 /* AIF1TX23_DITHER_ENA */ +#define WM8996_AIF1TX23_DITHER_ENA_MASK 0x0002 /* AIF1TX23_DITHER_ENA */ +#define WM8996_AIF1TX23_DITHER_ENA_SHIFT 1 /* AIF1TX23_DITHER_ENA */ +#define WM8996_AIF1TX23_DITHER_ENA_WIDTH 1 /* AIF1TX23_DITHER_ENA */ +#define WM8996_AIF1TX01_DITHER_ENA 0x0001 /* AIF1TX01_DITHER_ENA */ +#define WM8996_AIF1TX01_DITHER_ENA_MASK 0x0001 /* AIF1TX01_DITHER_ENA */ +#define WM8996_AIF1TX01_DITHER_ENA_SHIFT 0 /* AIF1TX01_DITHER_ENA */ +#define WM8996_AIF1TX01_DITHER_ENA_WIDTH 1 /* AIF1TX01_DITHER_ENA */ + +/* + * R800 (0x320) - AIF2 Control + */ +#define WM8996_AIF2_TRI 0x0004 /* AIF2_TRI */ +#define WM8996_AIF2_TRI_MASK 0x0004 /* AIF2_TRI */ +#define WM8996_AIF2_TRI_SHIFT 2 /* AIF2_TRI */ +#define WM8996_AIF2_TRI_WIDTH 1 /* AIF2_TRI */ +#define WM8996_AIF2_FMT_MASK 0x0003 /* AIF2_FMT - [1:0] */ +#define WM8996_AIF2_FMT_SHIFT 0 /* AIF2_FMT - [1:0] */ +#define WM8996_AIF2_FMT_WIDTH 2 /* AIF2_FMT - [1:0] */ + +/* + * R801 (0x321) - AIF2 BCLK + */ +#define WM8996_AIF2_BCLK_INV 0x0400 /* AIF2_BCLK_INV */ +#define WM8996_AIF2_BCLK_INV_MASK 0x0400 /* AIF2_BCLK_INV */ +#define WM8996_AIF2_BCLK_INV_SHIFT 10 /* AIF2_BCLK_INV */ +#define WM8996_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */ +#define WM8996_AIF2_BCLK_FRC 0x0200 /* AIF2_BCLK_FRC */ +#define WM8996_AIF2_BCLK_FRC_MASK 0x0200 /* AIF2_BCLK_FRC */ +#define WM8996_AIF2_BCLK_FRC_SHIFT 9 /* AIF2_BCLK_FRC */ +#define WM8996_AIF2_BCLK_FRC_WIDTH 1 /* AIF2_BCLK_FRC */ +#define WM8996_AIF2_BCLK_MSTR 0x0100 /* AIF2_BCLK_MSTR */ +#define WM8996_AIF2_BCLK_MSTR_MASK 0x0100 /* AIF2_BCLK_MSTR */ +#define WM8996_AIF2_BCLK_MSTR_SHIFT 8 /* AIF2_BCLK_MSTR */ +#define WM8996_AIF2_BCLK_MSTR_WIDTH 1 /* AIF2_BCLK_MSTR */ +#define WM8996_AIF2_BCLK_DIV_MASK 0x000F /* AIF2_BCLK_DIV - [3:0] */ +#define WM8996_AIF2_BCLK_DIV_SHIFT 0 /* AIF2_BCLK_DIV - [3:0] */ +#define WM8996_AIF2_BCLK_DIV_WIDTH 4 /* AIF2_BCLK_DIV - [3:0] */ + +/* + * R802 (0x322) - AIF2 TX LRCLK(1) + */ +#define WM8996_AIF2TX_RATE_MASK 0x07FF /* AIF2TX_RATE - [10:0] */ +#define WM8996_AIF2TX_RATE_SHIFT 0 /* AIF2TX_RATE - [10:0] */ +#define WM8996_AIF2TX_RATE_WIDTH 11 /* AIF2TX_RATE - [10:0] */ + +/* + * R803 (0x323) - AIF2 TX LRCLK(2) + */ +#define WM8996_AIF2TX_LRCLK_MODE 0x0008 /* AIF2TX_LRCLK_MODE */ +#define WM8996_AIF2TX_LRCLK_MODE_MASK 0x0008 /* AIF2TX_LRCLK_MODE */ +#define WM8996_AIF2TX_LRCLK_MODE_SHIFT 3 /* AIF2TX_LRCLK_MODE */ +#define WM8996_AIF2TX_LRCLK_MODE_WIDTH 1 /* AIF2TX_LRCLK_MODE */ +#define WM8996_AIF2TX_LRCLK_INV 0x0004 /* AIF2TX_LRCLK_INV */ +#define WM8996_AIF2TX_LRCLK_INV_MASK 0x0004 /* AIF2TX_LRCLK_INV */ +#define WM8996_AIF2TX_LRCLK_INV_SHIFT 2 /* AIF2TX_LRCLK_INV */ +#define WM8996_AIF2TX_LRCLK_INV_WIDTH 1 /* AIF2TX_LRCLK_INV */ +#define WM8996_AIF2TX_LRCLK_FRC 0x0002 /* AIF2TX_LRCLK_FRC */ +#define WM8996_AIF2TX_LRCLK_FRC_MASK 0x0002 /* AIF2TX_LRCLK_FRC */ +#define WM8996_AIF2TX_LRCLK_FRC_SHIFT 1 /* AIF2TX_LRCLK_FRC */ +#define WM8996_AIF2TX_LRCLK_FRC_WIDTH 1 /* AIF2TX_LRCLK_FRC */ +#define WM8996_AIF2TX_LRCLK_MSTR 0x0001 /* AIF2TX_LRCLK_MSTR */ +#define WM8996_AIF2TX_LRCLK_MSTR_MASK 0x0001 /* AIF2TX_LRCLK_MSTR */ +#define WM8996_AIF2TX_LRCLK_MSTR_SHIFT 0 /* AIF2TX_LRCLK_MSTR */ +#define WM8996_AIF2TX_LRCLK_MSTR_WIDTH 1 /* AIF2TX_LRCLK_MSTR */ + +/* + * R804 (0x324) - AIF2 RX LRCLK(1) + */ +#define WM8996_AIF2RX_RATE_MASK 0x07FF /* AIF2RX_RATE - [10:0] */ +#define WM8996_AIF2RX_RATE_SHIFT 0 /* AIF2RX_RATE - [10:0] */ +#define WM8996_AIF2RX_RATE_WIDTH 11 /* AIF2RX_RATE - [10:0] */ + +/* + * R805 (0x325) - AIF2 RX LRCLK(2) + */ +#define WM8996_AIF2RX_LRCLK_INV 0x0004 /* AIF2RX_LRCLK_INV */ +#define WM8996_AIF2RX_LRCLK_INV_MASK 0x0004 /* AIF2RX_LRCLK_INV */ +#define WM8996_AIF2RX_LRCLK_INV_SHIFT 2 /* AIF2RX_LRCLK_INV */ +#define WM8996_AIF2RX_LRCLK_INV_WIDTH 1 /* AIF2RX_LRCLK_INV */ +#define WM8996_AIF2RX_LRCLK_FRC 0x0002 /* AIF2RX_LRCLK_FRC */ +#define WM8996_AIF2RX_LRCLK_FRC_MASK 0x0002 /* AIF2RX_LRCLK_FRC */ +#define WM8996_AIF2RX_LRCLK_FRC_SHIFT 1 /* AIF2RX_LRCLK_FRC */ +#define WM8996_AIF2RX_LRCLK_FRC_WIDTH 1 /* AIF2RX_LRCLK_FRC */ +#define WM8996_AIF2RX_LRCLK_MSTR 0x0001 /* AIF2RX_LRCLK_MSTR */ +#define WM8996_AIF2RX_LRCLK_MSTR_MASK 0x0001 /* AIF2RX_LRCLK_MSTR */ +#define WM8996_AIF2RX_LRCLK_MSTR_SHIFT 0 /* AIF2RX_LRCLK_MSTR */ +#define WM8996_AIF2RX_LRCLK_MSTR_WIDTH 1 /* AIF2RX_LRCLK_MSTR */ + +/* + * R806 (0x326) - AIF2TX Data Configuration (1) + */ +#define WM8996_AIF2TX_WL_MASK 0xFF00 /* AIF2TX_WL - [15:8] */ +#define WM8996_AIF2TX_WL_SHIFT 8 /* AIF2TX_WL - [15:8] */ +#define WM8996_AIF2TX_WL_WIDTH 8 /* AIF2TX_WL - [15:8] */ +#define WM8996_AIF2TX_SLOT_LEN_MASK 0x00FF /* AIF2TX_SLOT_LEN - [7:0] */ +#define WM8996_AIF2TX_SLOT_LEN_SHIFT 0 /* AIF2TX_SLOT_LEN - [7:0] */ +#define WM8996_AIF2TX_SLOT_LEN_WIDTH 8 /* AIF2TX_SLOT_LEN - [7:0] */ + +/* + * R807 (0x327) - AIF2TX Data Configuration (2) + */ +#define WM8996_AIF2TX_DAT_TRI 0x0001 /* AIF2TX_DAT_TRI */ +#define WM8996_AIF2TX_DAT_TRI_MASK 0x0001 /* AIF2TX_DAT_TRI */ +#define WM8996_AIF2TX_DAT_TRI_SHIFT 0 /* AIF2TX_DAT_TRI */ +#define WM8996_AIF2TX_DAT_TRI_WIDTH 1 /* AIF2TX_DAT_TRI */ + +/* + * R808 (0x328) - AIF2RX Data Configuration + */ +#define WM8996_AIF2RX_WL_MASK 0xFF00 /* AIF2RX_WL - [15:8] */ +#define WM8996_AIF2RX_WL_SHIFT 8 /* AIF2RX_WL - [15:8] */ +#define WM8996_AIF2RX_WL_WIDTH 8 /* AIF2RX_WL - [15:8] */ +#define WM8996_AIF2RX_SLOT_LEN_MASK 0x00FF /* AIF2RX_SLOT_LEN - [7:0] */ +#define WM8996_AIF2RX_SLOT_LEN_SHIFT 0 /* AIF2RX_SLOT_LEN - [7:0] */ +#define WM8996_AIF2RX_SLOT_LEN_WIDTH 8 /* AIF2RX_SLOT_LEN - [7:0] */ + +/* + * R809 (0x329) - AIF2TX Channel 0 Configuration + */ +#define WM8996_AIF2TX_CHAN0_DAT_INV 0x8000 /* AIF2TX_CHAN0_DAT_INV */ +#define WM8996_AIF2TX_CHAN0_DAT_INV_MASK 0x8000 /* AIF2TX_CHAN0_DAT_INV */ +#define WM8996_AIF2TX_CHAN0_DAT_INV_SHIFT 15 /* AIF2TX_CHAN0_DAT_INV */ +#define WM8996_AIF2TX_CHAN0_DAT_INV_WIDTH 1 /* AIF2TX_CHAN0_DAT_INV */ +#define WM8996_AIF2TX_CHAN0_SPACING_MASK 0x7E00 /* AIF2TX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF2TX_CHAN0_SPACING_SHIFT 9 /* AIF2TX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF2TX_CHAN0_SPACING_WIDTH 6 /* AIF2TX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF2TX_CHAN0_SLOTS_MASK 0x01C0 /* AIF2TX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF2TX_CHAN0_SLOTS_SHIFT 6 /* AIF2TX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF2TX_CHAN0_SLOTS_WIDTH 3 /* AIF2TX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF2TX_CHAN0_START_SLOT_MASK 0x003F /* AIF2TX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF2TX_CHAN0_START_SLOT_SHIFT 0 /* AIF2TX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF2TX_CHAN0_START_SLOT_WIDTH 6 /* AIF2TX_CHAN0_START_SLOT - [5:0] */ + +/* + * R810 (0x32A) - AIF2TX Channel 1 Configuration + */ +#define WM8996_AIF2TX_CHAN1_DAT_INV 0x8000 /* AIF2TX_CHAN1_DAT_INV */ +#define WM8996_AIF2TX_CHAN1_DAT_INV_MASK 0x8000 /* AIF2TX_CHAN1_DAT_INV */ +#define WM8996_AIF2TX_CHAN1_DAT_INV_SHIFT 15 /* AIF2TX_CHAN1_DAT_INV */ +#define WM8996_AIF2TX_CHAN1_DAT_INV_WIDTH 1 /* AIF2TX_CHAN1_DAT_INV */ +#define WM8996_AIF2TX_CHAN1_SPACING_MASK 0x7E00 /* AIF2TX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF2TX_CHAN1_SPACING_SHIFT 9 /* AIF2TX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF2TX_CHAN1_SPACING_WIDTH 6 /* AIF2TX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF2TX_CHAN1_SLOTS_MASK 0x01C0 /* AIF2TX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF2TX_CHAN1_SLOTS_SHIFT 6 /* AIF2TX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF2TX_CHAN1_SLOTS_WIDTH 3 /* AIF2TX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF2TX_CHAN1_START_SLOT_MASK 0x003F /* AIF2TX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF2TX_CHAN1_START_SLOT_SHIFT 0 /* AIF2TX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF2TX_CHAN1_START_SLOT_WIDTH 6 /* AIF2TX_CHAN1_START_SLOT - [5:0] */ + +/* + * R811 (0x32B) - AIF2RX Channel 0 Configuration + */ +#define WM8996_AIF2RX_CHAN0_DAT_INV 0x8000 /* AIF2RX_CHAN0_DAT_INV */ +#define WM8996_AIF2RX_CHAN0_DAT_INV_MASK 0x8000 /* AIF2RX_CHAN0_DAT_INV */ +#define WM8996_AIF2RX_CHAN0_DAT_INV_SHIFT 15 /* AIF2RX_CHAN0_DAT_INV */ +#define WM8996_AIF2RX_CHAN0_DAT_INV_WIDTH 1 /* AIF2RX_CHAN0_DAT_INV */ +#define WM8996_AIF2RX_CHAN0_SPACING_MASK 0x7E00 /* AIF2RX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF2RX_CHAN0_SPACING_SHIFT 9 /* AIF2RX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF2RX_CHAN0_SPACING_WIDTH 6 /* AIF2RX_CHAN0_SPACING - [14:9] */ +#define WM8996_AIF2RX_CHAN0_SLOTS_MASK 0x01C0 /* AIF2RX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF2RX_CHAN0_SLOTS_SHIFT 6 /* AIF2RX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF2RX_CHAN0_SLOTS_WIDTH 3 /* AIF2RX_CHAN0_SLOTS - [8:6] */ +#define WM8996_AIF2RX_CHAN0_START_SLOT_MASK 0x003F /* AIF2RX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF2RX_CHAN0_START_SLOT_SHIFT 0 /* AIF2RX_CHAN0_START_SLOT - [5:0] */ +#define WM8996_AIF2RX_CHAN0_START_SLOT_WIDTH 6 /* AIF2RX_CHAN0_START_SLOT - [5:0] */ + +/* + * R812 (0x32C) - AIF2RX Channel 1 Configuration + */ +#define WM8996_AIF2RX_CHAN1_DAT_INV 0x8000 /* AIF2RX_CHAN1_DAT_INV */ +#define WM8996_AIF2RX_CHAN1_DAT_INV_MASK 0x8000 /* AIF2RX_CHAN1_DAT_INV */ +#define WM8996_AIF2RX_CHAN1_DAT_INV_SHIFT 15 /* AIF2RX_CHAN1_DAT_INV */ +#define WM8996_AIF2RX_CHAN1_DAT_INV_WIDTH 1 /* AIF2RX_CHAN1_DAT_INV */ +#define WM8996_AIF2RX_CHAN1_SPACING_MASK 0x7E00 /* AIF2RX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF2RX_CHAN1_SPACING_SHIFT 9 /* AIF2RX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF2RX_CHAN1_SPACING_WIDTH 6 /* AIF2RX_CHAN1_SPACING - [14:9] */ +#define WM8996_AIF2RX_CHAN1_SLOTS_MASK 0x01C0 /* AIF2RX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF2RX_CHAN1_SLOTS_SHIFT 6 /* AIF2RX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF2RX_CHAN1_SLOTS_WIDTH 3 /* AIF2RX_CHAN1_SLOTS - [8:6] */ +#define WM8996_AIF2RX_CHAN1_START_SLOT_MASK 0x003F /* AIF2RX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF2RX_CHAN1_START_SLOT_SHIFT 0 /* AIF2RX_CHAN1_START_SLOT - [5:0] */ +#define WM8996_AIF2RX_CHAN1_START_SLOT_WIDTH 6 /* AIF2RX_CHAN1_START_SLOT - [5:0] */ + +/* + * R813 (0x32D) - AIF2RX Mono Configuration + */ +#define WM8996_AIF2RX_CHAN0_MONO_MODE 0x0001 /* AIF2RX_CHAN0_MONO_MODE */ +#define WM8996_AIF2RX_CHAN0_MONO_MODE_MASK 0x0001 /* AIF2RX_CHAN0_MONO_MODE */ +#define WM8996_AIF2RX_CHAN0_MONO_MODE_SHIFT 0 /* AIF2RX_CHAN0_MONO_MODE */ +#define WM8996_AIF2RX_CHAN0_MONO_MODE_WIDTH 1 /* AIF2RX_CHAN0_MONO_MODE */ + +/* + * R815 (0x32F) - AIF2TX Test + */ +#define WM8996_AIF2TX_DITHER_ENA 0x0001 /* AIF2TX_DITHER_ENA */ +#define WM8996_AIF2TX_DITHER_ENA_MASK 0x0001 /* AIF2TX_DITHER_ENA */ +#define WM8996_AIF2TX_DITHER_ENA_SHIFT 0 /* AIF2TX_DITHER_ENA */ +#define WM8996_AIF2TX_DITHER_ENA_WIDTH 1 /* AIF2TX_DITHER_ENA */ + +/* + * R1024 (0x400) - DSP1 TX Left Volume + */ +#define WM8996_DSP1TX_VU 0x0100 /* DSP1TX_VU */ +#define WM8996_DSP1TX_VU_MASK 0x0100 /* DSP1TX_VU */ +#define WM8996_DSP1TX_VU_SHIFT 8 /* DSP1TX_VU */ +#define WM8996_DSP1TX_VU_WIDTH 1 /* DSP1TX_VU */ +#define WM8996_DSP1TXL_VOL_MASK 0x00FF /* DSP1TXL_VOL - [7:0] */ +#define WM8996_DSP1TXL_VOL_SHIFT 0 /* DSP1TXL_VOL - [7:0] */ +#define WM8996_DSP1TXL_VOL_WIDTH 8 /* DSP1TXL_VOL - [7:0] */ + +/* + * R1025 (0x401) - DSP1 TX Right Volume + */ +#define WM8996_DSP1TX_VU 0x0100 /* DSP1TX_VU */ +#define WM8996_DSP1TX_VU_MASK 0x0100 /* DSP1TX_VU */ +#define WM8996_DSP1TX_VU_SHIFT 8 /* DSP1TX_VU */ +#define WM8996_DSP1TX_VU_WIDTH 1 /* DSP1TX_VU */ +#define WM8996_DSP1TXR_VOL_MASK 0x00FF /* DSP1TXR_VOL - [7:0] */ +#define WM8996_DSP1TXR_VOL_SHIFT 0 /* DSP1TXR_VOL - [7:0] */ +#define WM8996_DSP1TXR_VOL_WIDTH 8 /* DSP1TXR_VOL - [7:0] */ + +/* + * R1026 (0x402) - DSP1 RX Left Volume + */ +#define WM8996_DSP1RX_VU 0x0100 /* DSP1RX_VU */ +#define WM8996_DSP1RX_VU_MASK 0x0100 /* DSP1RX_VU */ +#define WM8996_DSP1RX_VU_SHIFT 8 /* DSP1RX_VU */ +#define WM8996_DSP1RX_VU_WIDTH 1 /* DSP1RX_VU */ +#define WM8996_DSP1RXL_VOL_MASK 0x00FF /* DSP1RXL_VOL - [7:0] */ +#define WM8996_DSP1RXL_VOL_SHIFT 0 /* DSP1RXL_VOL - [7:0] */ +#define WM8996_DSP1RXL_VOL_WIDTH 8 /* DSP1RXL_VOL - [7:0] */ + +/* + * R1027 (0x403) - DSP1 RX Right Volume + */ +#define WM8996_DSP1RX_VU 0x0100 /* DSP1RX_VU */ +#define WM8996_DSP1RX_VU_MASK 0x0100 /* DSP1RX_VU */ +#define WM8996_DSP1RX_VU_SHIFT 8 /* DSP1RX_VU */ +#define WM8996_DSP1RX_VU_WIDTH 1 /* DSP1RX_VU */ +#define WM8996_DSP1RXR_VOL_MASK 0x00FF /* DSP1RXR_VOL - [7:0] */ +#define WM8996_DSP1RXR_VOL_SHIFT 0 /* DSP1RXR_VOL - [7:0] */ +#define WM8996_DSP1RXR_VOL_WIDTH 8 /* DSP1RXR_VOL - [7:0] */ + +/* + * R1040 (0x410) - DSP1 TX Filters + */ +#define WM8996_DSP1TX_NF 0x2000 /* DSP1TX_NF */ +#define WM8996_DSP1TX_NF_MASK 0x2000 /* DSP1TX_NF */ +#define WM8996_DSP1TX_NF_SHIFT 13 /* DSP1TX_NF */ +#define WM8996_DSP1TX_NF_WIDTH 1 /* DSP1TX_NF */ +#define WM8996_DSP1TXL_HPF 0x1000 /* DSP1TXL_HPF */ +#define WM8996_DSP1TXL_HPF_MASK 0x1000 /* DSP1TXL_HPF */ +#define WM8996_DSP1TXL_HPF_SHIFT 12 /* DSP1TXL_HPF */ +#define WM8996_DSP1TXL_HPF_WIDTH 1 /* DSP1TXL_HPF */ +#define WM8996_DSP1TXR_HPF 0x0800 /* DSP1TXR_HPF */ +#define WM8996_DSP1TXR_HPF_MASK 0x0800 /* DSP1TXR_HPF */ +#define WM8996_DSP1TXR_HPF_SHIFT 11 /* DSP1TXR_HPF */ +#define WM8996_DSP1TXR_HPF_WIDTH 1 /* DSP1TXR_HPF */ +#define WM8996_DSP1TX_HPF_MODE_MASK 0x0018 /* DSP1TX_HPF_MODE - [4:3] */ +#define WM8996_DSP1TX_HPF_MODE_SHIFT 3 /* DSP1TX_HPF_MODE - [4:3] */ +#define WM8996_DSP1TX_HPF_MODE_WIDTH 2 /* DSP1TX_HPF_MODE - [4:3] */ +#define WM8996_DSP1TX_HPF_CUT_MASK 0x0007 /* DSP1TX_HPF_CUT - [2:0] */ +#define WM8996_DSP1TX_HPF_CUT_SHIFT 0 /* DSP1TX_HPF_CUT - [2:0] */ +#define WM8996_DSP1TX_HPF_CUT_WIDTH 3 /* DSP1TX_HPF_CUT - [2:0] */ + +/* + * R1056 (0x420) - DSP1 RX Filters (1) + */ +#define WM8996_DSP1RX_MUTE 0x0200 /* DSP1RX_MUTE */ +#define WM8996_DSP1RX_MUTE_MASK 0x0200 /* DSP1RX_MUTE */ +#define WM8996_DSP1RX_MUTE_SHIFT 9 /* DSP1RX_MUTE */ +#define WM8996_DSP1RX_MUTE_WIDTH 1 /* DSP1RX_MUTE */ +#define WM8996_DSP1RX_MONO 0x0080 /* DSP1RX_MONO */ +#define WM8996_DSP1RX_MONO_MASK 0x0080 /* DSP1RX_MONO */ +#define WM8996_DSP1RX_MONO_SHIFT 7 /* DSP1RX_MONO */ +#define WM8996_DSP1RX_MONO_WIDTH 1 /* DSP1RX_MONO */ +#define WM8996_DSP1RX_MUTERATE 0x0020 /* DSP1RX_MUTERATE */ +#define WM8996_DSP1RX_MUTERATE_MASK 0x0020 /* DSP1RX_MUTERATE */ +#define WM8996_DSP1RX_MUTERATE_SHIFT 5 /* DSP1RX_MUTERATE */ +#define WM8996_DSP1RX_MUTERATE_WIDTH 1 /* DSP1RX_MUTERATE */ +#define WM8996_DSP1RX_UNMUTE_RAMP 0x0010 /* DSP1RX_UNMUTE_RAMP */ +#define WM8996_DSP1RX_UNMUTE_RAMP_MASK 0x0010 /* DSP1RX_UNMUTE_RAMP */ +#define WM8996_DSP1RX_UNMUTE_RAMP_SHIFT 4 /* DSP1RX_UNMUTE_RAMP */ +#define WM8996_DSP1RX_UNMUTE_RAMP_WIDTH 1 /* DSP1RX_UNMUTE_RAMP */ + +/* + * R1057 (0x421) - DSP1 RX Filters (2) + */ +#define WM8996_DSP1RX_3D_GAIN_MASK 0x3E00 /* DSP1RX_3D_GAIN - [13:9] */ +#define WM8996_DSP1RX_3D_GAIN_SHIFT 9 /* DSP1RX_3D_GAIN - [13:9] */ +#define WM8996_DSP1RX_3D_GAIN_WIDTH 5 /* DSP1RX_3D_GAIN - [13:9] */ +#define WM8996_DSP1RX_3D_ENA 0x0100 /* DSP1RX_3D_ENA */ +#define WM8996_DSP1RX_3D_ENA_MASK 0x0100 /* DSP1RX_3D_ENA */ +#define WM8996_DSP1RX_3D_ENA_SHIFT 8 /* DSP1RX_3D_ENA */ +#define WM8996_DSP1RX_3D_ENA_WIDTH 1 /* DSP1RX_3D_ENA */ + +/* + * R1088 (0x440) - DSP1 DRC (1) + */ +#define WM8996_DSP1DRC_SIG_DET_RMS_MASK 0xF800 /* DSP1DRC_SIG_DET_RMS - [15:11] */ +#define WM8996_DSP1DRC_SIG_DET_RMS_SHIFT 11 /* DSP1DRC_SIG_DET_RMS - [15:11] */ +#define WM8996_DSP1DRC_SIG_DET_RMS_WIDTH 5 /* DSP1DRC_SIG_DET_RMS - [15:11] */ +#define WM8996_DSP1DRC_SIG_DET_PK_MASK 0x0600 /* DSP1DRC_SIG_DET_PK - [10:9] */ +#define WM8996_DSP1DRC_SIG_DET_PK_SHIFT 9 /* DSP1DRC_SIG_DET_PK - [10:9] */ +#define WM8996_DSP1DRC_SIG_DET_PK_WIDTH 2 /* DSP1DRC_SIG_DET_PK - [10:9] */ +#define WM8996_DSP1DRC_NG_ENA 0x0100 /* DSP1DRC_NG_ENA */ +#define WM8996_DSP1DRC_NG_ENA_MASK 0x0100 /* DSP1DRC_NG_ENA */ +#define WM8996_DSP1DRC_NG_ENA_SHIFT 8 /* DSP1DRC_NG_ENA */ +#define WM8996_DSP1DRC_NG_ENA_WIDTH 1 /* DSP1DRC_NG_ENA */ +#define WM8996_DSP1DRC_SIG_DET_MODE 0x0080 /* DSP1DRC_SIG_DET_MODE */ +#define WM8996_DSP1DRC_SIG_DET_MODE_MASK 0x0080 /* DSP1DRC_SIG_DET_MODE */ +#define WM8996_DSP1DRC_SIG_DET_MODE_SHIFT 7 /* DSP1DRC_SIG_DET_MODE */ +#define WM8996_DSP1DRC_SIG_DET_MODE_WIDTH 1 /* DSP1DRC_SIG_DET_MODE */ +#define WM8996_DSP1DRC_SIG_DET 0x0040 /* DSP1DRC_SIG_DET */ +#define WM8996_DSP1DRC_SIG_DET_MASK 0x0040 /* DSP1DRC_SIG_DET */ +#define WM8996_DSP1DRC_SIG_DET_SHIFT 6 /* DSP1DRC_SIG_DET */ +#define WM8996_DSP1DRC_SIG_DET_WIDTH 1 /* DSP1DRC_SIG_DET */ +#define WM8996_DSP1DRC_KNEE2_OP_ENA 0x0020 /* DSP1DRC_KNEE2_OP_ENA */ +#define WM8996_DSP1DRC_KNEE2_OP_ENA_MASK 0x0020 /* DSP1DRC_KNEE2_OP_ENA */ +#define WM8996_DSP1DRC_KNEE2_OP_ENA_SHIFT 5 /* DSP1DRC_KNEE2_OP_ENA */ +#define WM8996_DSP1DRC_KNEE2_OP_ENA_WIDTH 1 /* DSP1DRC_KNEE2_OP_ENA */ +#define WM8996_DSP1DRC_QR 0x0010 /* DSP1DRC_QR */ +#define WM8996_DSP1DRC_QR_MASK 0x0010 /* DSP1DRC_QR */ +#define WM8996_DSP1DRC_QR_SHIFT 4 /* DSP1DRC_QR */ +#define WM8996_DSP1DRC_QR_WIDTH 1 /* DSP1DRC_QR */ +#define WM8996_DSP1DRC_ANTICLIP 0x0008 /* DSP1DRC_ANTICLIP */ +#define WM8996_DSP1DRC_ANTICLIP_MASK 0x0008 /* DSP1DRC_ANTICLIP */ +#define WM8996_DSP1DRC_ANTICLIP_SHIFT 3 /* DSP1DRC_ANTICLIP */ +#define WM8996_DSP1DRC_ANTICLIP_WIDTH 1 /* DSP1DRC_ANTICLIP */ +#define WM8996_DSP1RX_DRC_ENA 0x0004 /* DSP1RX_DRC_ENA */ +#define WM8996_DSP1RX_DRC_ENA_MASK 0x0004 /* DSP1RX_DRC_ENA */ +#define WM8996_DSP1RX_DRC_ENA_SHIFT 2 /* DSP1RX_DRC_ENA */ +#define WM8996_DSP1RX_DRC_ENA_WIDTH 1 /* DSP1RX_DRC_ENA */ +#define WM8996_DSP1TXL_DRC_ENA 0x0002 /* DSP1TXL_DRC_ENA */ +#define WM8996_DSP1TXL_DRC_ENA_MASK 0x0002 /* DSP1TXL_DRC_ENA */ +#define WM8996_DSP1TXL_DRC_ENA_SHIFT 1 /* DSP1TXL_DRC_ENA */ +#define WM8996_DSP1TXL_DRC_ENA_WIDTH 1 /* DSP1TXL_DRC_ENA */ +#define WM8996_DSP1TXR_DRC_ENA 0x0001 /* DSP1TXR_DRC_ENA */ +#define WM8996_DSP1TXR_DRC_ENA_MASK 0x0001 /* DSP1TXR_DRC_ENA */ +#define WM8996_DSP1TXR_DRC_ENA_SHIFT 0 /* DSP1TXR_DRC_ENA */ +#define WM8996_DSP1TXR_DRC_ENA_WIDTH 1 /* DSP1TXR_DRC_ENA */ + +/* + * R1089 (0x441) - DSP1 DRC (2) + */ +#define WM8996_DSP1DRC_ATK_MASK 0x1E00 /* DSP1DRC_ATK - [12:9] */ +#define WM8996_DSP1DRC_ATK_SHIFT 9 /* DSP1DRC_ATK - [12:9] */ +#define WM8996_DSP1DRC_ATK_WIDTH 4 /* DSP1DRC_ATK - [12:9] */ +#define WM8996_DSP1DRC_DCY_MASK 0x01E0 /* DSP1DRC_DCY - [8:5] */ +#define WM8996_DSP1DRC_DCY_SHIFT 5 /* DSP1DRC_DCY - [8:5] */ +#define WM8996_DSP1DRC_DCY_WIDTH 4 /* DSP1DRC_DCY - [8:5] */ +#define WM8996_DSP1DRC_MINGAIN_MASK 0x001C /* DSP1DRC_MINGAIN - [4:2] */ +#define WM8996_DSP1DRC_MINGAIN_SHIFT 2 /* DSP1DRC_MINGAIN - [4:2] */ +#define WM8996_DSP1DRC_MINGAIN_WIDTH 3 /* DSP1DRC_MINGAIN - [4:2] */ +#define WM8996_DSP1DRC_MAXGAIN_MASK 0x0003 /* DSP1DRC_MAXGAIN - [1:0] */ +#define WM8996_DSP1DRC_MAXGAIN_SHIFT 0 /* DSP1DRC_MAXGAIN - [1:0] */ +#define WM8996_DSP1DRC_MAXGAIN_WIDTH 2 /* DSP1DRC_MAXGAIN - [1:0] */ + +/* + * R1090 (0x442) - DSP1 DRC (3) + */ +#define WM8996_DSP1DRC_NG_MINGAIN_MASK 0xF000 /* DSP1DRC_NG_MINGAIN - [15:12] */ +#define WM8996_DSP1DRC_NG_MINGAIN_SHIFT 12 /* DSP1DRC_NG_MINGAIN - [15:12] */ +#define WM8996_DSP1DRC_NG_MINGAIN_WIDTH 4 /* DSP1DRC_NG_MINGAIN - [15:12] */ +#define WM8996_DSP1DRC_NG_EXP_MASK 0x0C00 /* DSP1DRC_NG_EXP - [11:10] */ +#define WM8996_DSP1DRC_NG_EXP_SHIFT 10 /* DSP1DRC_NG_EXP - [11:10] */ +#define WM8996_DSP1DRC_NG_EXP_WIDTH 2 /* DSP1DRC_NG_EXP - [11:10] */ +#define WM8996_DSP1DRC_QR_THR_MASK 0x0300 /* DSP1DRC_QR_THR - [9:8] */ +#define WM8996_DSP1DRC_QR_THR_SHIFT 8 /* DSP1DRC_QR_THR - [9:8] */ +#define WM8996_DSP1DRC_QR_THR_WIDTH 2 /* DSP1DRC_QR_THR - [9:8] */ +#define WM8996_DSP1DRC_QR_DCY_MASK 0x00C0 /* DSP1DRC_QR_DCY - [7:6] */ +#define WM8996_DSP1DRC_QR_DCY_SHIFT 6 /* DSP1DRC_QR_DCY - [7:6] */ +#define WM8996_DSP1DRC_QR_DCY_WIDTH 2 /* DSP1DRC_QR_DCY - [7:6] */ +#define WM8996_DSP1DRC_HI_COMP_MASK 0x0038 /* DSP1DRC_HI_COMP - [5:3] */ +#define WM8996_DSP1DRC_HI_COMP_SHIFT 3 /* DSP1DRC_HI_COMP - [5:3] */ +#define WM8996_DSP1DRC_HI_COMP_WIDTH 3 /* DSP1DRC_HI_COMP - [5:3] */ +#define WM8996_DSP1DRC_LO_COMP_MASK 0x0007 /* DSP1DRC_LO_COMP - [2:0] */ +#define WM8996_DSP1DRC_LO_COMP_SHIFT 0 /* DSP1DRC_LO_COMP - [2:0] */ +#define WM8996_DSP1DRC_LO_COMP_WIDTH 3 /* DSP1DRC_LO_COMP - [2:0] */ + +/* + * R1091 (0x443) - DSP1 DRC (4) + */ +#define WM8996_DSP1DRC_KNEE_IP_MASK 0x07E0 /* DSP1DRC_KNEE_IP - [10:5] */ +#define WM8996_DSP1DRC_KNEE_IP_SHIFT 5 /* DSP1DRC_KNEE_IP - [10:5] */ +#define WM8996_DSP1DRC_KNEE_IP_WIDTH 6 /* DSP1DRC_KNEE_IP - [10:5] */ +#define WM8996_DSP1DRC_KNEE_OP_MASK 0x001F /* DSP1DRC_KNEE_OP - [4:0] */ +#define WM8996_DSP1DRC_KNEE_OP_SHIFT 0 /* DSP1DRC_KNEE_OP - [4:0] */ +#define WM8996_DSP1DRC_KNEE_OP_WIDTH 5 /* DSP1DRC_KNEE_OP - [4:0] */ + +/* + * R1092 (0x444) - DSP1 DRC (5) + */ +#define WM8996_DSP1DRC_KNEE2_IP_MASK 0x03E0 /* DSP1DRC_KNEE2_IP - [9:5] */ +#define WM8996_DSP1DRC_KNEE2_IP_SHIFT 5 /* DSP1DRC_KNEE2_IP - [9:5] */ +#define WM8996_DSP1DRC_KNEE2_IP_WIDTH 5 /* DSP1DRC_KNEE2_IP - [9:5] */ +#define WM8996_DSP1DRC_KNEE2_OP_MASK 0x001F /* DSP1DRC_KNEE2_OP - [4:0] */ +#define WM8996_DSP1DRC_KNEE2_OP_SHIFT 0 /* DSP1DRC_KNEE2_OP - [4:0] */ +#define WM8996_DSP1DRC_KNEE2_OP_WIDTH 5 /* DSP1DRC_KNEE2_OP - [4:0] */ + +/* + * R1152 (0x480) - DSP1 RX EQ Gains (1) + */ +#define WM8996_DSP1RX_EQ_B1_GAIN_MASK 0xF800 /* DSP1RX_EQ_B1_GAIN - [15:11] */ +#define WM8996_DSP1RX_EQ_B1_GAIN_SHIFT 11 /* DSP1RX_EQ_B1_GAIN - [15:11] */ +#define WM8996_DSP1RX_EQ_B1_GAIN_WIDTH 5 /* DSP1RX_EQ_B1_GAIN - [15:11] */ +#define WM8996_DSP1RX_EQ_B2_GAIN_MASK 0x07C0 /* DSP1RX_EQ_B2_GAIN - [10:6] */ +#define WM8996_DSP1RX_EQ_B2_GAIN_SHIFT 6 /* DSP1RX_EQ_B2_GAIN - [10:6] */ +#define WM8996_DSP1RX_EQ_B2_GAIN_WIDTH 5 /* DSP1RX_EQ_B2_GAIN - [10:6] */ +#define WM8996_DSP1RX_EQ_B3_GAIN_MASK 0x003E /* DSP1RX_EQ_B3_GAIN - [5:1] */ +#define WM8996_DSP1RX_EQ_B3_GAIN_SHIFT 1 /* DSP1RX_EQ_B3_GAIN - [5:1] */ +#define WM8996_DSP1RX_EQ_B3_GAIN_WIDTH 5 /* DSP1RX_EQ_B3_GAIN - [5:1] */ +#define WM8996_DSP1RX_EQ_ENA 0x0001 /* DSP1RX_EQ_ENA */ +#define WM8996_DSP1RX_EQ_ENA_MASK 0x0001 /* DSP1RX_EQ_ENA */ +#define WM8996_DSP1RX_EQ_ENA_SHIFT 0 /* DSP1RX_EQ_ENA */ +#define WM8996_DSP1RX_EQ_ENA_WIDTH 1 /* DSP1RX_EQ_ENA */ + +/* + * R1153 (0x481) - DSP1 RX EQ Gains (2) + */ +#define WM8996_DSP1RX_EQ_B4_GAIN_MASK 0xF800 /* DSP1RX_EQ_B4_GAIN - [15:11] */ +#define WM8996_DSP1RX_EQ_B4_GAIN_SHIFT 11 /* DSP1RX_EQ_B4_GAIN - [15:11] */ +#define WM8996_DSP1RX_EQ_B4_GAIN_WIDTH 5 /* DSP1RX_EQ_B4_GAIN - [15:11] */ +#define WM8996_DSP1RX_EQ_B5_GAIN_MASK 0x07C0 /* DSP1RX_EQ_B5_GAIN - [10:6] */ +#define WM8996_DSP1RX_EQ_B5_GAIN_SHIFT 6 /* DSP1RX_EQ_B5_GAIN - [10:6] */ +#define WM8996_DSP1RX_EQ_B5_GAIN_WIDTH 5 /* DSP1RX_EQ_B5_GAIN - [10:6] */ + +/* + * R1154 (0x482) - DSP1 RX EQ Band 1 A + */ +#define WM8996_DSP1RX_EQ_B1_A_MASK 0xFFFF /* DSP1RX_EQ_B1_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B1_A_SHIFT 0 /* DSP1RX_EQ_B1_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B1_A_WIDTH 16 /* DSP1RX_EQ_B1_A - [15:0] */ + +/* + * R1155 (0x483) - DSP1 RX EQ Band 1 B + */ +#define WM8996_DSP1RX_EQ_B1_B_MASK 0xFFFF /* DSP1RX_EQ_B1_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B1_B_SHIFT 0 /* DSP1RX_EQ_B1_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B1_B_WIDTH 16 /* DSP1RX_EQ_B1_B - [15:0] */ + +/* + * R1156 (0x484) - DSP1 RX EQ Band 1 PG + */ +#define WM8996_DSP1RX_EQ_B1_PG_MASK 0xFFFF /* DSP1RX_EQ_B1_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B1_PG_SHIFT 0 /* DSP1RX_EQ_B1_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B1_PG_WIDTH 16 /* DSP1RX_EQ_B1_PG - [15:0] */ + +/* + * R1157 (0x485) - DSP1 RX EQ Band 2 A + */ +#define WM8996_DSP1RX_EQ_B2_A_MASK 0xFFFF /* DSP1RX_EQ_B2_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_A_SHIFT 0 /* DSP1RX_EQ_B2_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_A_WIDTH 16 /* DSP1RX_EQ_B2_A - [15:0] */ + +/* + * R1158 (0x486) - DSP1 RX EQ Band 2 B + */ +#define WM8996_DSP1RX_EQ_B2_B_MASK 0xFFFF /* DSP1RX_EQ_B2_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_B_SHIFT 0 /* DSP1RX_EQ_B2_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_B_WIDTH 16 /* DSP1RX_EQ_B2_B - [15:0] */ + +/* + * R1159 (0x487) - DSP1 RX EQ Band 2 C + */ +#define WM8996_DSP1RX_EQ_B2_C_MASK 0xFFFF /* DSP1RX_EQ_B2_C - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_C_SHIFT 0 /* DSP1RX_EQ_B2_C - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_C_WIDTH 16 /* DSP1RX_EQ_B2_C - [15:0] */ + +/* + * R1160 (0x488) - DSP1 RX EQ Band 2 PG + */ +#define WM8996_DSP1RX_EQ_B2_PG_MASK 0xFFFF /* DSP1RX_EQ_B2_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_PG_SHIFT 0 /* DSP1RX_EQ_B2_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B2_PG_WIDTH 16 /* DSP1RX_EQ_B2_PG - [15:0] */ + +/* + * R1161 (0x489) - DSP1 RX EQ Band 3 A + */ +#define WM8996_DSP1RX_EQ_B3_A_MASK 0xFFFF /* DSP1RX_EQ_B3_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_A_SHIFT 0 /* DSP1RX_EQ_B3_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_A_WIDTH 16 /* DSP1RX_EQ_B3_A - [15:0] */ + +/* + * R1162 (0x48A) - DSP1 RX EQ Band 3 B + */ +#define WM8996_DSP1RX_EQ_B3_B_MASK 0xFFFF /* DSP1RX_EQ_B3_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_B_SHIFT 0 /* DSP1RX_EQ_B3_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_B_WIDTH 16 /* DSP1RX_EQ_B3_B - [15:0] */ + +/* + * R1163 (0x48B) - DSP1 RX EQ Band 3 C + */ +#define WM8996_DSP1RX_EQ_B3_C_MASK 0xFFFF /* DSP1RX_EQ_B3_C - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_C_SHIFT 0 /* DSP1RX_EQ_B3_C - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_C_WIDTH 16 /* DSP1RX_EQ_B3_C - [15:0] */ + +/* + * R1164 (0x48C) - DSP1 RX EQ Band 3 PG + */ +#define WM8996_DSP1RX_EQ_B3_PG_MASK 0xFFFF /* DSP1RX_EQ_B3_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_PG_SHIFT 0 /* DSP1RX_EQ_B3_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B3_PG_WIDTH 16 /* DSP1RX_EQ_B3_PG - [15:0] */ + +/* + * R1165 (0x48D) - DSP1 RX EQ Band 4 A + */ +#define WM8996_DSP1RX_EQ_B4_A_MASK 0xFFFF /* DSP1RX_EQ_B4_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_A_SHIFT 0 /* DSP1RX_EQ_B4_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_A_WIDTH 16 /* DSP1RX_EQ_B4_A - [15:0] */ + +/* + * R1166 (0x48E) - DSP1 RX EQ Band 4 B + */ +#define WM8996_DSP1RX_EQ_B4_B_MASK 0xFFFF /* DSP1RX_EQ_B4_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_B_SHIFT 0 /* DSP1RX_EQ_B4_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_B_WIDTH 16 /* DSP1RX_EQ_B4_B - [15:0] */ + +/* + * R1167 (0x48F) - DSP1 RX EQ Band 4 C + */ +#define WM8996_DSP1RX_EQ_B4_C_MASK 0xFFFF /* DSP1RX_EQ_B4_C - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_C_SHIFT 0 /* DSP1RX_EQ_B4_C - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_C_WIDTH 16 /* DSP1RX_EQ_B4_C - [15:0] */ + +/* + * R1168 (0x490) - DSP1 RX EQ Band 4 PG + */ +#define WM8996_DSP1RX_EQ_B4_PG_MASK 0xFFFF /* DSP1RX_EQ_B4_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_PG_SHIFT 0 /* DSP1RX_EQ_B4_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B4_PG_WIDTH 16 /* DSP1RX_EQ_B4_PG - [15:0] */ + +/* + * R1169 (0x491) - DSP1 RX EQ Band 5 A + */ +#define WM8996_DSP1RX_EQ_B5_A_MASK 0xFFFF /* DSP1RX_EQ_B5_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B5_A_SHIFT 0 /* DSP1RX_EQ_B5_A - [15:0] */ +#define WM8996_DSP1RX_EQ_B5_A_WIDTH 16 /* DSP1RX_EQ_B5_A - [15:0] */ + +/* + * R1170 (0x492) - DSP1 RX EQ Band 5 B + */ +#define WM8996_DSP1RX_EQ_B5_B_MASK 0xFFFF /* DSP1RX_EQ_B5_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B5_B_SHIFT 0 /* DSP1RX_EQ_B5_B - [15:0] */ +#define WM8996_DSP1RX_EQ_B5_B_WIDTH 16 /* DSP1RX_EQ_B5_B - [15:0] */ + +/* + * R1171 (0x493) - DSP1 RX EQ Band 5 PG + */ +#define WM8996_DSP1RX_EQ_B5_PG_MASK 0xFFFF /* DSP1RX_EQ_B5_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B5_PG_SHIFT 0 /* DSP1RX_EQ_B5_PG - [15:0] */ +#define WM8996_DSP1RX_EQ_B5_PG_WIDTH 16 /* DSP1RX_EQ_B5_PG - [15:0] */ + +/* + * R1280 (0x500) - DSP2 TX Left Volume + */ +#define WM8996_DSP2TX_VU 0x0100 /* DSP2TX_VU */ +#define WM8996_DSP2TX_VU_MASK 0x0100 /* DSP2TX_VU */ +#define WM8996_DSP2TX_VU_SHIFT 8 /* DSP2TX_VU */ +#define WM8996_DSP2TX_VU_WIDTH 1 /* DSP2TX_VU */ +#define WM8996_DSP2TXL_VOL_MASK 0x00FF /* DSP2TXL_VOL - [7:0] */ +#define WM8996_DSP2TXL_VOL_SHIFT 0 /* DSP2TXL_VOL - [7:0] */ +#define WM8996_DSP2TXL_VOL_WIDTH 8 /* DSP2TXL_VOL - [7:0] */ + +/* + * R1281 (0x501) - DSP2 TX Right Volume + */ +#define WM8996_DSP2TX_VU 0x0100 /* DSP2TX_VU */ +#define WM8996_DSP2TX_VU_MASK 0x0100 /* DSP2TX_VU */ +#define WM8996_DSP2TX_VU_SHIFT 8 /* DSP2TX_VU */ +#define WM8996_DSP2TX_VU_WIDTH 1 /* DSP2TX_VU */ +#define WM8996_DSP2TXR_VOL_MASK 0x00FF /* DSP2TXR_VOL - [7:0] */ +#define WM8996_DSP2TXR_VOL_SHIFT 0 /* DSP2TXR_VOL - [7:0] */ +#define WM8996_DSP2TXR_VOL_WIDTH 8 /* DSP2TXR_VOL - [7:0] */ + +/* + * R1282 (0x502) - DSP2 RX Left Volume + */ +#define WM8996_DSP2RX_VU 0x0100 /* DSP2RX_VU */ +#define WM8996_DSP2RX_VU_MASK 0x0100 /* DSP2RX_VU */ +#define WM8996_DSP2RX_VU_SHIFT 8 /* DSP2RX_VU */ +#define WM8996_DSP2RX_VU_WIDTH 1 /* DSP2RX_VU */ +#define WM8996_DSP2RXL_VOL_MASK 0x00FF /* DSP2RXL_VOL - [7:0] */ +#define WM8996_DSP2RXL_VOL_SHIFT 0 /* DSP2RXL_VOL - [7:0] */ +#define WM8996_DSP2RXL_VOL_WIDTH 8 /* DSP2RXL_VOL - [7:0] */ + +/* + * R1283 (0x503) - DSP2 RX Right Volume + */ +#define WM8996_DSP2RX_VU 0x0100 /* DSP2RX_VU */ +#define WM8996_DSP2RX_VU_MASK 0x0100 /* DSP2RX_VU */ +#define WM8996_DSP2RX_VU_SHIFT 8 /* DSP2RX_VU */ +#define WM8996_DSP2RX_VU_WIDTH 1 /* DSP2RX_VU */ +#define WM8996_DSP2RXR_VOL_MASK 0x00FF /* DSP2RXR_VOL - [7:0] */ +#define WM8996_DSP2RXR_VOL_SHIFT 0 /* DSP2RXR_VOL - [7:0] */ +#define WM8996_DSP2RXR_VOL_WIDTH 8 /* DSP2RXR_VOL - [7:0] */ + +/* + * R1296 (0x510) - DSP2 TX Filters + */ +#define WM8996_DSP2TX_NF 0x2000 /* DSP2TX_NF */ +#define WM8996_DSP2TX_NF_MASK 0x2000 /* DSP2TX_NF */ +#define WM8996_DSP2TX_NF_SHIFT 13 /* DSP2TX_NF */ +#define WM8996_DSP2TX_NF_WIDTH 1 /* DSP2TX_NF */ +#define WM8996_DSP2TXL_HPF 0x1000 /* DSP2TXL_HPF */ +#define WM8996_DSP2TXL_HPF_MASK 0x1000 /* DSP2TXL_HPF */ +#define WM8996_DSP2TXL_HPF_SHIFT 12 /* DSP2TXL_HPF */ +#define WM8996_DSP2TXL_HPF_WIDTH 1 /* DSP2TXL_HPF */ +#define WM8996_DSP2TXR_HPF 0x0800 /* DSP2TXR_HPF */ +#define WM8996_DSP2TXR_HPF_MASK 0x0800 /* DSP2TXR_HPF */ +#define WM8996_DSP2TXR_HPF_SHIFT 11 /* DSP2TXR_HPF */ +#define WM8996_DSP2TXR_HPF_WIDTH 1 /* DSP2TXR_HPF */ +#define WM8996_DSP2TX_HPF_MODE_MASK 0x0018 /* DSP2TX_HPF_MODE - [4:3] */ +#define WM8996_DSP2TX_HPF_MODE_SHIFT 3 /* DSP2TX_HPF_MODE - [4:3] */ +#define WM8996_DSP2TX_HPF_MODE_WIDTH 2 /* DSP2TX_HPF_MODE - [4:3] */ +#define WM8996_DSP2TX_HPF_CUT_MASK 0x0007 /* DSP2TX_HPF_CUT - [2:0] */ +#define WM8996_DSP2TX_HPF_CUT_SHIFT 0 /* DSP2TX_HPF_CUT - [2:0] */ +#define WM8996_DSP2TX_HPF_CUT_WIDTH 3 /* DSP2TX_HPF_CUT - [2:0] */ + +/* + * R1312 (0x520) - DSP2 RX Filters (1) + */ +#define WM8996_DSP2RX_MUTE 0x0200 /* DSP2RX_MUTE */ +#define WM8996_DSP2RX_MUTE_MASK 0x0200 /* DSP2RX_MUTE */ +#define WM8996_DSP2RX_MUTE_SHIFT 9 /* DSP2RX_MUTE */ +#define WM8996_DSP2RX_MUTE_WIDTH 1 /* DSP2RX_MUTE */ +#define WM8996_DSP2RX_MONO 0x0080 /* DSP2RX_MONO */ +#define WM8996_DSP2RX_MONO_MASK 0x0080 /* DSP2RX_MONO */ +#define WM8996_DSP2RX_MONO_SHIFT 7 /* DSP2RX_MONO */ +#define WM8996_DSP2RX_MONO_WIDTH 1 /* DSP2RX_MONO */ +#define WM8996_DSP2RX_MUTERATE 0x0020 /* DSP2RX_MUTERATE */ +#define WM8996_DSP2RX_MUTERATE_MASK 0x0020 /* DSP2RX_MUTERATE */ +#define WM8996_DSP2RX_MUTERATE_SHIFT 5 /* DSP2RX_MUTERATE */ +#define WM8996_DSP2RX_MUTERATE_WIDTH 1 /* DSP2RX_MUTERATE */ +#define WM8996_DSP2RX_UNMUTE_RAMP 0x0010 /* DSP2RX_UNMUTE_RAMP */ +#define WM8996_DSP2RX_UNMUTE_RAMP_MASK 0x0010 /* DSP2RX_UNMUTE_RAMP */ +#define WM8996_DSP2RX_UNMUTE_RAMP_SHIFT 4 /* DSP2RX_UNMUTE_RAMP */ +#define WM8996_DSP2RX_UNMUTE_RAMP_WIDTH 1 /* DSP2RX_UNMUTE_RAMP */ + +/* + * R1313 (0x521) - DSP2 RX Filters (2) + */ +#define WM8996_DSP2RX_3D_GAIN_MASK 0x3E00 /* DSP2RX_3D_GAIN - [13:9] */ +#define WM8996_DSP2RX_3D_GAIN_SHIFT 9 /* DSP2RX_3D_GAIN - [13:9] */ +#define WM8996_DSP2RX_3D_GAIN_WIDTH 5 /* DSP2RX_3D_GAIN - [13:9] */ +#define WM8996_DSP2RX_3D_ENA 0x0100 /* DSP2RX_3D_ENA */ +#define WM8996_DSP2RX_3D_ENA_MASK 0x0100 /* DSP2RX_3D_ENA */ +#define WM8996_DSP2RX_3D_ENA_SHIFT 8 /* DSP2RX_3D_ENA */ +#define WM8996_DSP2RX_3D_ENA_WIDTH 1 /* DSP2RX_3D_ENA */ + +/* + * R1344 (0x540) - DSP2 DRC (1) + */ +#define WM8996_DSP2DRC_SIG_DET_RMS_MASK 0xF800 /* DSP2DRC_SIG_DET_RMS - [15:11] */ +#define WM8996_DSP2DRC_SIG_DET_RMS_SHIFT 11 /* DSP2DRC_SIG_DET_RMS - [15:11] */ +#define WM8996_DSP2DRC_SIG_DET_RMS_WIDTH 5 /* DSP2DRC_SIG_DET_RMS - [15:11] */ +#define WM8996_DSP2DRC_SIG_DET_PK_MASK 0x0600 /* DSP2DRC_SIG_DET_PK - [10:9] */ +#define WM8996_DSP2DRC_SIG_DET_PK_SHIFT 9 /* DSP2DRC_SIG_DET_PK - [10:9] */ +#define WM8996_DSP2DRC_SIG_DET_PK_WIDTH 2 /* DSP2DRC_SIG_DET_PK - [10:9] */ +#define WM8996_DSP2DRC_NG_ENA 0x0100 /* DSP2DRC_NG_ENA */ +#define WM8996_DSP2DRC_NG_ENA_MASK 0x0100 /* DSP2DRC_NG_ENA */ +#define WM8996_DSP2DRC_NG_ENA_SHIFT 8 /* DSP2DRC_NG_ENA */ +#define WM8996_DSP2DRC_NG_ENA_WIDTH 1 /* DSP2DRC_NG_ENA */ +#define WM8996_DSP2DRC_SIG_DET_MODE 0x0080 /* DSP2DRC_SIG_DET_MODE */ +#define WM8996_DSP2DRC_SIG_DET_MODE_MASK 0x0080 /* DSP2DRC_SIG_DET_MODE */ +#define WM8996_DSP2DRC_SIG_DET_MODE_SHIFT 7 /* DSP2DRC_SIG_DET_MODE */ +#define WM8996_DSP2DRC_SIG_DET_MODE_WIDTH 1 /* DSP2DRC_SIG_DET_MODE */ +#define WM8996_DSP2DRC_SIG_DET 0x0040 /* DSP2DRC_SIG_DET */ +#define WM8996_DSP2DRC_SIG_DET_MASK 0x0040 /* DSP2DRC_SIG_DET */ +#define WM8996_DSP2DRC_SIG_DET_SHIFT 6 /* DSP2DRC_SIG_DET */ +#define WM8996_DSP2DRC_SIG_DET_WIDTH 1 /* DSP2DRC_SIG_DET */ +#define WM8996_DSP2DRC_KNEE2_OP_ENA 0x0020 /* DSP2DRC_KNEE2_OP_ENA */ +#define WM8996_DSP2DRC_KNEE2_OP_ENA_MASK 0x0020 /* DSP2DRC_KNEE2_OP_ENA */ +#define WM8996_DSP2DRC_KNEE2_OP_ENA_SHIFT 5 /* DSP2DRC_KNEE2_OP_ENA */ +#define WM8996_DSP2DRC_KNEE2_OP_ENA_WIDTH 1 /* DSP2DRC_KNEE2_OP_ENA */ +#define WM8996_DSP2DRC_QR 0x0010 /* DSP2DRC_QR */ +#define WM8996_DSP2DRC_QR_MASK 0x0010 /* DSP2DRC_QR */ +#define WM8996_DSP2DRC_QR_SHIFT 4 /* DSP2DRC_QR */ +#define WM8996_DSP2DRC_QR_WIDTH 1 /* DSP2DRC_QR */ +#define WM8996_DSP2DRC_ANTICLIP 0x0008 /* DSP2DRC_ANTICLIP */ +#define WM8996_DSP2DRC_ANTICLIP_MASK 0x0008 /* DSP2DRC_ANTICLIP */ +#define WM8996_DSP2DRC_ANTICLIP_SHIFT 3 /* DSP2DRC_ANTICLIP */ +#define WM8996_DSP2DRC_ANTICLIP_WIDTH 1 /* DSP2DRC_ANTICLIP */ +#define WM8996_DSP2RX_DRC_ENA 0x0004 /* DSP2RX_DRC_ENA */ +#define WM8996_DSP2RX_DRC_ENA_MASK 0x0004 /* DSP2RX_DRC_ENA */ +#define WM8996_DSP2RX_DRC_ENA_SHIFT 2 /* DSP2RX_DRC_ENA */ +#define WM8996_DSP2RX_DRC_ENA_WIDTH 1 /* DSP2RX_DRC_ENA */ +#define WM8996_DSP2TXL_DRC_ENA 0x0002 /* DSP2TXL_DRC_ENA */ +#define WM8996_DSP2TXL_DRC_ENA_MASK 0x0002 /* DSP2TXL_DRC_ENA */ +#define WM8996_DSP2TXL_DRC_ENA_SHIFT 1 /* DSP2TXL_DRC_ENA */ +#define WM8996_DSP2TXL_DRC_ENA_WIDTH 1 /* DSP2TXL_DRC_ENA */ +#define WM8996_DSP2TXR_DRC_ENA 0x0001 /* DSP2TXR_DRC_ENA */ +#define WM8996_DSP2TXR_DRC_ENA_MASK 0x0001 /* DSP2TXR_DRC_ENA */ +#define WM8996_DSP2TXR_DRC_ENA_SHIFT 0 /* DSP2TXR_DRC_ENA */ +#define WM8996_DSP2TXR_DRC_ENA_WIDTH 1 /* DSP2TXR_DRC_ENA */ + +/* + * R1345 (0x541) - DSP2 DRC (2) + */ +#define WM8996_DSP2DRC_ATK_MASK 0x1E00 /* DSP2DRC_ATK - [12:9] */ +#define WM8996_DSP2DRC_ATK_SHIFT 9 /* DSP2DRC_ATK - [12:9] */ +#define WM8996_DSP2DRC_ATK_WIDTH 4 /* DSP2DRC_ATK - [12:9] */ +#define WM8996_DSP2DRC_DCY_MASK 0x01E0 /* DSP2DRC_DCY - [8:5] */ +#define WM8996_DSP2DRC_DCY_SHIFT 5 /* DSP2DRC_DCY - [8:5] */ +#define WM8996_DSP2DRC_DCY_WIDTH 4 /* DSP2DRC_DCY - [8:5] */ +#define WM8996_DSP2DRC_MINGAIN_MASK 0x001C /* DSP2DRC_MINGAIN - [4:2] */ +#define WM8996_DSP2DRC_MINGAIN_SHIFT 2 /* DSP2DRC_MINGAIN - [4:2] */ +#define WM8996_DSP2DRC_MINGAIN_WIDTH 3 /* DSP2DRC_MINGAIN - [4:2] */ +#define WM8996_DSP2DRC_MAXGAIN_MASK 0x0003 /* DSP2DRC_MAXGAIN - [1:0] */ +#define WM8996_DSP2DRC_MAXGAIN_SHIFT 0 /* DSP2DRC_MAXGAIN - [1:0] */ +#define WM8996_DSP2DRC_MAXGAIN_WIDTH 2 /* DSP2DRC_MAXGAIN - [1:0] */ + +/* + * R1346 (0x542) - DSP2 DRC (3) + */ +#define WM8996_DSP2DRC_NG_MINGAIN_MASK 0xF000 /* DSP2DRC_NG_MINGAIN - [15:12] */ +#define WM8996_DSP2DRC_NG_MINGAIN_SHIFT 12 /* DSP2DRC_NG_MINGAIN - [15:12] */ +#define WM8996_DSP2DRC_NG_MINGAIN_WIDTH 4 /* DSP2DRC_NG_MINGAIN - [15:12] */ +#define WM8996_DSP2DRC_NG_EXP_MASK 0x0C00 /* DSP2DRC_NG_EXP - [11:10] */ +#define WM8996_DSP2DRC_NG_EXP_SHIFT 10 /* DSP2DRC_NG_EXP - [11:10] */ +#define WM8996_DSP2DRC_NG_EXP_WIDTH 2 /* DSP2DRC_NG_EXP - [11:10] */ +#define WM8996_DSP2DRC_QR_THR_MASK 0x0300 /* DSP2DRC_QR_THR - [9:8] */ +#define WM8996_DSP2DRC_QR_THR_SHIFT 8 /* DSP2DRC_QR_THR - [9:8] */ +#define WM8996_DSP2DRC_QR_THR_WIDTH 2 /* DSP2DRC_QR_THR - [9:8] */ +#define WM8996_DSP2DRC_QR_DCY_MASK 0x00C0 /* DSP2DRC_QR_DCY - [7:6] */ +#define WM8996_DSP2DRC_QR_DCY_SHIFT 6 /* DSP2DRC_QR_DCY - [7:6] */ +#define WM8996_DSP2DRC_QR_DCY_WIDTH 2 /* DSP2DRC_QR_DCY - [7:6] */ +#define WM8996_DSP2DRC_HI_COMP_MASK 0x0038 /* DSP2DRC_HI_COMP - [5:3] */ +#define WM8996_DSP2DRC_HI_COMP_SHIFT 3 /* DSP2DRC_HI_COMP - [5:3] */ +#define WM8996_DSP2DRC_HI_COMP_WIDTH 3 /* DSP2DRC_HI_COMP - [5:3] */ +#define WM8996_DSP2DRC_LO_COMP_MASK 0x0007 /* DSP2DRC_LO_COMP - [2:0] */ +#define WM8996_DSP2DRC_LO_COMP_SHIFT 0 /* DSP2DRC_LO_COMP - [2:0] */ +#define WM8996_DSP2DRC_LO_COMP_WIDTH 3 /* DSP2DRC_LO_COMP - [2:0] */ + +/* + * R1347 (0x543) - DSP2 DRC (4) + */ +#define WM8996_DSP2DRC_KNEE_IP_MASK 0x07E0 /* DSP2DRC_KNEE_IP - [10:5] */ +#define WM8996_DSP2DRC_KNEE_IP_SHIFT 5 /* DSP2DRC_KNEE_IP - [10:5] */ +#define WM8996_DSP2DRC_KNEE_IP_WIDTH 6 /* DSP2DRC_KNEE_IP - [10:5] */ +#define WM8996_DSP2DRC_KNEE_OP_MASK 0x001F /* DSP2DRC_KNEE_OP - [4:0] */ +#define WM8996_DSP2DRC_KNEE_OP_SHIFT 0 /* DSP2DRC_KNEE_OP - [4:0] */ +#define WM8996_DSP2DRC_KNEE_OP_WIDTH 5 /* DSP2DRC_KNEE_OP - [4:0] */ + +/* + * R1348 (0x544) - DSP2 DRC (5) + */ +#define WM8996_DSP2DRC_KNEE2_IP_MASK 0x03E0 /* DSP2DRC_KNEE2_IP - [9:5] */ +#define WM8996_DSP2DRC_KNEE2_IP_SHIFT 5 /* DSP2DRC_KNEE2_IP - [9:5] */ +#define WM8996_DSP2DRC_KNEE2_IP_WIDTH 5 /* DSP2DRC_KNEE2_IP - [9:5] */ +#define WM8996_DSP2DRC_KNEE2_OP_MASK 0x001F /* DSP2DRC_KNEE2_OP - [4:0] */ +#define WM8996_DSP2DRC_KNEE2_OP_SHIFT 0 /* DSP2DRC_KNEE2_OP - [4:0] */ +#define WM8996_DSP2DRC_KNEE2_OP_WIDTH 5 /* DSP2DRC_KNEE2_OP - [4:0] */ + +/* + * R1408 (0x580) - DSP2 RX EQ Gains (1) + */ +#define WM8996_DSP2RX_EQ_B1_GAIN_MASK 0xF800 /* DSP2RX_EQ_B1_GAIN - [15:11] */ +#define WM8996_DSP2RX_EQ_B1_GAIN_SHIFT 11 /* DSP2RX_EQ_B1_GAIN - [15:11] */ +#define WM8996_DSP2RX_EQ_B1_GAIN_WIDTH 5 /* DSP2RX_EQ_B1_GAIN - [15:11] */ +#define WM8996_DSP2RX_EQ_B2_GAIN_MASK 0x07C0 /* DSP2RX_EQ_B2_GAIN - [10:6] */ +#define WM8996_DSP2RX_EQ_B2_GAIN_SHIFT 6 /* DSP2RX_EQ_B2_GAIN - [10:6] */ +#define WM8996_DSP2RX_EQ_B2_GAIN_WIDTH 5 /* DSP2RX_EQ_B2_GAIN - [10:6] */ +#define WM8996_DSP2RX_EQ_B3_GAIN_MASK 0x003E /* DSP2RX_EQ_B3_GAIN - [5:1] */ +#define WM8996_DSP2RX_EQ_B3_GAIN_SHIFT 1 /* DSP2RX_EQ_B3_GAIN - [5:1] */ +#define WM8996_DSP2RX_EQ_B3_GAIN_WIDTH 5 /* DSP2RX_EQ_B3_GAIN - [5:1] */ +#define WM8996_DSP2RX_EQ_ENA 0x0001 /* DSP2RX_EQ_ENA */ +#define WM8996_DSP2RX_EQ_ENA_MASK 0x0001 /* DSP2RX_EQ_ENA */ +#define WM8996_DSP2RX_EQ_ENA_SHIFT 0 /* DSP2RX_EQ_ENA */ +#define WM8996_DSP2RX_EQ_ENA_WIDTH 1 /* DSP2RX_EQ_ENA */ + +/* + * R1409 (0x581) - DSP2 RX EQ Gains (2) + */ +#define WM8996_DSP2RX_EQ_B4_GAIN_MASK 0xF800 /* DSP2RX_EQ_B4_GAIN - [15:11] */ +#define WM8996_DSP2RX_EQ_B4_GAIN_SHIFT 11 /* DSP2RX_EQ_B4_GAIN - [15:11] */ +#define WM8996_DSP2RX_EQ_B4_GAIN_WIDTH 5 /* DSP2RX_EQ_B4_GAIN - [15:11] */ +#define WM8996_DSP2RX_EQ_B5_GAIN_MASK 0x07C0 /* DSP2RX_EQ_B5_GAIN - [10:6] */ +#define WM8996_DSP2RX_EQ_B5_GAIN_SHIFT 6 /* DSP2RX_EQ_B5_GAIN - [10:6] */ +#define WM8996_DSP2RX_EQ_B5_GAIN_WIDTH 5 /* DSP2RX_EQ_B5_GAIN - [10:6] */ + +/* + * R1410 (0x582) - DSP2 RX EQ Band 1 A + */ +#define WM8996_DSP2RX_EQ_B1_A_MASK 0xFFFF /* DSP2RX_EQ_B1_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B1_A_SHIFT 0 /* DSP2RX_EQ_B1_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B1_A_WIDTH 16 /* DSP2RX_EQ_B1_A - [15:0] */ + +/* + * R1411 (0x583) - DSP2 RX EQ Band 1 B + */ +#define WM8996_DSP2RX_EQ_B1_B_MASK 0xFFFF /* DSP2RX_EQ_B1_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B1_B_SHIFT 0 /* DSP2RX_EQ_B1_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B1_B_WIDTH 16 /* DSP2RX_EQ_B1_B - [15:0] */ + +/* + * R1412 (0x584) - DSP2 RX EQ Band 1 PG + */ +#define WM8996_DSP2RX_EQ_B1_PG_MASK 0xFFFF /* DSP2RX_EQ_B1_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B1_PG_SHIFT 0 /* DSP2RX_EQ_B1_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B1_PG_WIDTH 16 /* DSP2RX_EQ_B1_PG - [15:0] */ + +/* + * R1413 (0x585) - DSP2 RX EQ Band 2 A + */ +#define WM8996_DSP2RX_EQ_B2_A_MASK 0xFFFF /* DSP2RX_EQ_B2_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_A_SHIFT 0 /* DSP2RX_EQ_B2_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_A_WIDTH 16 /* DSP2RX_EQ_B2_A - [15:0] */ + +/* + * R1414 (0x586) - DSP2 RX EQ Band 2 B + */ +#define WM8996_DSP2RX_EQ_B2_B_MASK 0xFFFF /* DSP2RX_EQ_B2_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_B_SHIFT 0 /* DSP2RX_EQ_B2_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_B_WIDTH 16 /* DSP2RX_EQ_B2_B - [15:0] */ + +/* + * R1415 (0x587) - DSP2 RX EQ Band 2 C + */ +#define WM8996_DSP2RX_EQ_B2_C_MASK 0xFFFF /* DSP2RX_EQ_B2_C - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_C_SHIFT 0 /* DSP2RX_EQ_B2_C - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_C_WIDTH 16 /* DSP2RX_EQ_B2_C - [15:0] */ + +/* + * R1416 (0x588) - DSP2 RX EQ Band 2 PG + */ +#define WM8996_DSP2RX_EQ_B2_PG_MASK 0xFFFF /* DSP2RX_EQ_B2_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_PG_SHIFT 0 /* DSP2RX_EQ_B2_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B2_PG_WIDTH 16 /* DSP2RX_EQ_B2_PG - [15:0] */ + +/* + * R1417 (0x589) - DSP2 RX EQ Band 3 A + */ +#define WM8996_DSP2RX_EQ_B3_A_MASK 0xFFFF /* DSP2RX_EQ_B3_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_A_SHIFT 0 /* DSP2RX_EQ_B3_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_A_WIDTH 16 /* DSP2RX_EQ_B3_A - [15:0] */ + +/* + * R1418 (0x58A) - DSP2 RX EQ Band 3 B + */ +#define WM8996_DSP2RX_EQ_B3_B_MASK 0xFFFF /* DSP2RX_EQ_B3_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_B_SHIFT 0 /* DSP2RX_EQ_B3_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_B_WIDTH 16 /* DSP2RX_EQ_B3_B - [15:0] */ + +/* + * R1419 (0x58B) - DSP2 RX EQ Band 3 C + */ +#define WM8996_DSP2RX_EQ_B3_C_MASK 0xFFFF /* DSP2RX_EQ_B3_C - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_C_SHIFT 0 /* DSP2RX_EQ_B3_C - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_C_WIDTH 16 /* DSP2RX_EQ_B3_C - [15:0] */ + +/* + * R1420 (0x58C) - DSP2 RX EQ Band 3 PG + */ +#define WM8996_DSP2RX_EQ_B3_PG_MASK 0xFFFF /* DSP2RX_EQ_B3_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_PG_SHIFT 0 /* DSP2RX_EQ_B3_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B3_PG_WIDTH 16 /* DSP2RX_EQ_B3_PG - [15:0] */ + +/* + * R1421 (0x58D) - DSP2 RX EQ Band 4 A + */ +#define WM8996_DSP2RX_EQ_B4_A_MASK 0xFFFF /* DSP2RX_EQ_B4_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_A_SHIFT 0 /* DSP2RX_EQ_B4_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_A_WIDTH 16 /* DSP2RX_EQ_B4_A - [15:0] */ + +/* + * R1422 (0x58E) - DSP2 RX EQ Band 4 B + */ +#define WM8996_DSP2RX_EQ_B4_B_MASK 0xFFFF /* DSP2RX_EQ_B4_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_B_SHIFT 0 /* DSP2RX_EQ_B4_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_B_WIDTH 16 /* DSP2RX_EQ_B4_B - [15:0] */ + +/* + * R1423 (0x58F) - DSP2 RX EQ Band 4 C + */ +#define WM8996_DSP2RX_EQ_B4_C_MASK 0xFFFF /* DSP2RX_EQ_B4_C - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_C_SHIFT 0 /* DSP2RX_EQ_B4_C - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_C_WIDTH 16 /* DSP2RX_EQ_B4_C - [15:0] */ + +/* + * R1424 (0x590) - DSP2 RX EQ Band 4 PG + */ +#define WM8996_DSP2RX_EQ_B4_PG_MASK 0xFFFF /* DSP2RX_EQ_B4_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_PG_SHIFT 0 /* DSP2RX_EQ_B4_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B4_PG_WIDTH 16 /* DSP2RX_EQ_B4_PG - [15:0] */ + +/* + * R1425 (0x591) - DSP2 RX EQ Band 5 A + */ +#define WM8996_DSP2RX_EQ_B5_A_MASK 0xFFFF /* DSP2RX_EQ_B5_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B5_A_SHIFT 0 /* DSP2RX_EQ_B5_A - [15:0] */ +#define WM8996_DSP2RX_EQ_B5_A_WIDTH 16 /* DSP2RX_EQ_B5_A - [15:0] */ + +/* + * R1426 (0x592) - DSP2 RX EQ Band 5 B + */ +#define WM8996_DSP2RX_EQ_B5_B_MASK 0xFFFF /* DSP2RX_EQ_B5_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B5_B_SHIFT 0 /* DSP2RX_EQ_B5_B - [15:0] */ +#define WM8996_DSP2RX_EQ_B5_B_WIDTH 16 /* DSP2RX_EQ_B5_B - [15:0] */ + +/* + * R1427 (0x593) - DSP2 RX EQ Band 5 PG + */ +#define WM8996_DSP2RX_EQ_B5_PG_MASK 0xFFFF /* DSP2RX_EQ_B5_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B5_PG_SHIFT 0 /* DSP2RX_EQ_B5_PG - [15:0] */ +#define WM8996_DSP2RX_EQ_B5_PG_WIDTH 16 /* DSP2RX_EQ_B5_PG - [15:0] */ + +/* + * R1536 (0x600) - DAC1 Mixer Volumes + */ +#define WM8996_ADCR_DAC1_VOL_MASK 0x03E0 /* ADCR_DAC1_VOL - [9:5] */ +#define WM8996_ADCR_DAC1_VOL_SHIFT 5 /* ADCR_DAC1_VOL - [9:5] */ +#define WM8996_ADCR_DAC1_VOL_WIDTH 5 /* ADCR_DAC1_VOL - [9:5] */ +#define WM8996_ADCL_DAC1_VOL_MASK 0x001F /* ADCL_DAC1_VOL - [4:0] */ +#define WM8996_ADCL_DAC1_VOL_SHIFT 0 /* ADCL_DAC1_VOL - [4:0] */ +#define WM8996_ADCL_DAC1_VOL_WIDTH 5 /* ADCL_DAC1_VOL - [4:0] */ + +/* + * R1537 (0x601) - DAC1 Left Mixer Routing + */ +#define WM8996_ADCR_TO_DAC1L 0x0020 /* ADCR_TO_DAC1L */ +#define WM8996_ADCR_TO_DAC1L_MASK 0x0020 /* ADCR_TO_DAC1L */ +#define WM8996_ADCR_TO_DAC1L_SHIFT 5 /* ADCR_TO_DAC1L */ +#define WM8996_ADCR_TO_DAC1L_WIDTH 1 /* ADCR_TO_DAC1L */ +#define WM8996_ADCL_TO_DAC1L 0x0010 /* ADCL_TO_DAC1L */ +#define WM8996_ADCL_TO_DAC1L_MASK 0x0010 /* ADCL_TO_DAC1L */ +#define WM8996_ADCL_TO_DAC1L_SHIFT 4 /* ADCL_TO_DAC1L */ +#define WM8996_ADCL_TO_DAC1L_WIDTH 1 /* ADCL_TO_DAC1L */ +#define WM8996_DSP2RXL_TO_DAC1L 0x0002 /* DSP2RXL_TO_DAC1L */ +#define WM8996_DSP2RXL_TO_DAC1L_MASK 0x0002 /* DSP2RXL_TO_DAC1L */ +#define WM8996_DSP2RXL_TO_DAC1L_SHIFT 1 /* DSP2RXL_TO_DAC1L */ +#define WM8996_DSP2RXL_TO_DAC1L_WIDTH 1 /* DSP2RXL_TO_DAC1L */ +#define WM8996_DSP1RXL_TO_DAC1L 0x0001 /* DSP1RXL_TO_DAC1L */ +#define WM8996_DSP1RXL_TO_DAC1L_MASK 0x0001 /* DSP1RXL_TO_DAC1L */ +#define WM8996_DSP1RXL_TO_DAC1L_SHIFT 0 /* DSP1RXL_TO_DAC1L */ +#define WM8996_DSP1RXL_TO_DAC1L_WIDTH 1 /* DSP1RXL_TO_DAC1L */ + +/* + * R1538 (0x602) - DAC1 Right Mixer Routing + */ +#define WM8996_ADCR_TO_DAC1R 0x0020 /* ADCR_TO_DAC1R */ +#define WM8996_ADCR_TO_DAC1R_MASK 0x0020 /* ADCR_TO_DAC1R */ +#define WM8996_ADCR_TO_DAC1R_SHIFT 5 /* ADCR_TO_DAC1R */ +#define WM8996_ADCR_TO_DAC1R_WIDTH 1 /* ADCR_TO_DAC1R */ +#define WM8996_ADCL_TO_DAC1R 0x0010 /* ADCL_TO_DAC1R */ +#define WM8996_ADCL_TO_DAC1R_MASK 0x0010 /* ADCL_TO_DAC1R */ +#define WM8996_ADCL_TO_DAC1R_SHIFT 4 /* ADCL_TO_DAC1R */ +#define WM8996_ADCL_TO_DAC1R_WIDTH 1 /* ADCL_TO_DAC1R */ +#define WM8996_DSP2RXR_TO_DAC1R 0x0002 /* DSP2RXR_TO_DAC1R */ +#define WM8996_DSP2RXR_TO_DAC1R_MASK 0x0002 /* DSP2RXR_TO_DAC1R */ +#define WM8996_DSP2RXR_TO_DAC1R_SHIFT 1 /* DSP2RXR_TO_DAC1R */ +#define WM8996_DSP2RXR_TO_DAC1R_WIDTH 1 /* DSP2RXR_TO_DAC1R */ +#define WM8996_DSP1RXR_TO_DAC1R 0x0001 /* DSP1RXR_TO_DAC1R */ +#define WM8996_DSP1RXR_TO_DAC1R_MASK 0x0001 /* DSP1RXR_TO_DAC1R */ +#define WM8996_DSP1RXR_TO_DAC1R_SHIFT 0 /* DSP1RXR_TO_DAC1R */ +#define WM8996_DSP1RXR_TO_DAC1R_WIDTH 1 /* DSP1RXR_TO_DAC1R */ + +/* + * R1539 (0x603) - DAC2 Mixer Volumes + */ +#define WM8996_ADCR_DAC2_VOL_MASK 0x03E0 /* ADCR_DAC2_VOL - [9:5] */ +#define WM8996_ADCR_DAC2_VOL_SHIFT 5 /* ADCR_DAC2_VOL - [9:5] */ +#define WM8996_ADCR_DAC2_VOL_WIDTH 5 /* ADCR_DAC2_VOL - [9:5] */ +#define WM8996_ADCL_DAC2_VOL_MASK 0x001F /* ADCL_DAC2_VOL - [4:0] */ +#define WM8996_ADCL_DAC2_VOL_SHIFT 0 /* ADCL_DAC2_VOL - [4:0] */ +#define WM8996_ADCL_DAC2_VOL_WIDTH 5 /* ADCL_DAC2_VOL - [4:0] */ + +/* + * R1540 (0x604) - DAC2 Left Mixer Routing + */ +#define WM8996_ADCR_TO_DAC2L 0x0020 /* ADCR_TO_DAC2L */ +#define WM8996_ADCR_TO_DAC2L_MASK 0x0020 /* ADCR_TO_DAC2L */ +#define WM8996_ADCR_TO_DAC2L_SHIFT 5 /* ADCR_TO_DAC2L */ +#define WM8996_ADCR_TO_DAC2L_WIDTH 1 /* ADCR_TO_DAC2L */ +#define WM8996_ADCL_TO_DAC2L 0x0010 /* ADCL_TO_DAC2L */ +#define WM8996_ADCL_TO_DAC2L_MASK 0x0010 /* ADCL_TO_DAC2L */ +#define WM8996_ADCL_TO_DAC2L_SHIFT 4 /* ADCL_TO_DAC2L */ +#define WM8996_ADCL_TO_DAC2L_WIDTH 1 /* ADCL_TO_DAC2L */ +#define WM8996_DSP2RXL_TO_DAC2L 0x0002 /* DSP2RXL_TO_DAC2L */ +#define WM8996_DSP2RXL_TO_DAC2L_MASK 0x0002 /* DSP2RXL_TO_DAC2L */ +#define WM8996_DSP2RXL_TO_DAC2L_SHIFT 1 /* DSP2RXL_TO_DAC2L */ +#define WM8996_DSP2RXL_TO_DAC2L_WIDTH 1 /* DSP2RXL_TO_DAC2L */ +#define WM8996_DSP1RXL_TO_DAC2L 0x0001 /* DSP1RXL_TO_DAC2L */ +#define WM8996_DSP1RXL_TO_DAC2L_MASK 0x0001 /* DSP1RXL_TO_DAC2L */ +#define WM8996_DSP1RXL_TO_DAC2L_SHIFT 0 /* DSP1RXL_TO_DAC2L */ +#define WM8996_DSP1RXL_TO_DAC2L_WIDTH 1 /* DSP1RXL_TO_DAC2L */ + +/* + * R1541 (0x605) - DAC2 Right Mixer Routing + */ +#define WM8996_ADCR_TO_DAC2R 0x0020 /* ADCR_TO_DAC2R */ +#define WM8996_ADCR_TO_DAC2R_MASK 0x0020 /* ADCR_TO_DAC2R */ +#define WM8996_ADCR_TO_DAC2R_SHIFT 5 /* ADCR_TO_DAC2R */ +#define WM8996_ADCR_TO_DAC2R_WIDTH 1 /* ADCR_TO_DAC2R */ +#define WM8996_ADCL_TO_DAC2R 0x0010 /* ADCL_TO_DAC2R */ +#define WM8996_ADCL_TO_DAC2R_MASK 0x0010 /* ADCL_TO_DAC2R */ +#define WM8996_ADCL_TO_DAC2R_SHIFT 4 /* ADCL_TO_DAC2R */ +#define WM8996_ADCL_TO_DAC2R_WIDTH 1 /* ADCL_TO_DAC2R */ +#define WM8996_DSP2RXR_TO_DAC2R 0x0002 /* DSP2RXR_TO_DAC2R */ +#define WM8996_DSP2RXR_TO_DAC2R_MASK 0x0002 /* DSP2RXR_TO_DAC2R */ +#define WM8996_DSP2RXR_TO_DAC2R_SHIFT 1 /* DSP2RXR_TO_DAC2R */ +#define WM8996_DSP2RXR_TO_DAC2R_WIDTH 1 /* DSP2RXR_TO_DAC2R */ +#define WM8996_DSP1RXR_TO_DAC2R 0x0001 /* DSP1RXR_TO_DAC2R */ +#define WM8996_DSP1RXR_TO_DAC2R_MASK 0x0001 /* DSP1RXR_TO_DAC2R */ +#define WM8996_DSP1RXR_TO_DAC2R_SHIFT 0 /* DSP1RXR_TO_DAC2R */ +#define WM8996_DSP1RXR_TO_DAC2R_WIDTH 1 /* DSP1RXR_TO_DAC2R */ + +/* + * R1542 (0x606) - DSP1 TX Left Mixer Routing + */ +#define WM8996_ADC1L_TO_DSP1TXL 0x0002 /* ADC1L_TO_DSP1TXL */ +#define WM8996_ADC1L_TO_DSP1TXL_MASK 0x0002 /* ADC1L_TO_DSP1TXL */ +#define WM8996_ADC1L_TO_DSP1TXL_SHIFT 1 /* ADC1L_TO_DSP1TXL */ +#define WM8996_ADC1L_TO_DSP1TXL_WIDTH 1 /* ADC1L_TO_DSP1TXL */ +#define WM8996_DACL_TO_DSP1TXL 0x0001 /* DACL_TO_DSP1TXL */ +#define WM8996_DACL_TO_DSP1TXL_MASK 0x0001 /* DACL_TO_DSP1TXL */ +#define WM8996_DACL_TO_DSP1TXL_SHIFT 0 /* DACL_TO_DSP1TXL */ +#define WM8996_DACL_TO_DSP1TXL_WIDTH 1 /* DACL_TO_DSP1TXL */ + +/* + * R1543 (0x607) - DSP1 TX Right Mixer Routing + */ +#define WM8996_ADC1R_TO_DSP1TXR 0x0002 /* ADC1R_TO_DSP1TXR */ +#define WM8996_ADC1R_TO_DSP1TXR_MASK 0x0002 /* ADC1R_TO_DSP1TXR */ +#define WM8996_ADC1R_TO_DSP1TXR_SHIFT 1 /* ADC1R_TO_DSP1TXR */ +#define WM8996_ADC1R_TO_DSP1TXR_WIDTH 1 /* ADC1R_TO_DSP1TXR */ +#define WM8996_DACR_TO_DSP1TXR 0x0001 /* DACR_TO_DSP1TXR */ +#define WM8996_DACR_TO_DSP1TXR_MASK 0x0001 /* DACR_TO_DSP1TXR */ +#define WM8996_DACR_TO_DSP1TXR_SHIFT 0 /* DACR_TO_DSP1TXR */ +#define WM8996_DACR_TO_DSP1TXR_WIDTH 1 /* DACR_TO_DSP1TXR */ + +/* + * R1544 (0x608) - DSP2 TX Left Mixer Routing + */ +#define WM8996_ADC2L_TO_DSP2TXL 0x0002 /* ADC2L_TO_DSP2TXL */ +#define WM8996_ADC2L_TO_DSP2TXL_MASK 0x0002 /* ADC2L_TO_DSP2TXL */ +#define WM8996_ADC2L_TO_DSP2TXL_SHIFT 1 /* ADC2L_TO_DSP2TXL */ +#define WM8996_ADC2L_TO_DSP2TXL_WIDTH 1 /* ADC2L_TO_DSP2TXL */ +#define WM8996_DACL_TO_DSP2TXL 0x0001 /* DACL_TO_DSP2TXL */ +#define WM8996_DACL_TO_DSP2TXL_MASK 0x0001 /* DACL_TO_DSP2TXL */ +#define WM8996_DACL_TO_DSP2TXL_SHIFT 0 /* DACL_TO_DSP2TXL */ +#define WM8996_DACL_TO_DSP2TXL_WIDTH 1 /* DACL_TO_DSP2TXL */ + +/* + * R1545 (0x609) - DSP2 TX Right Mixer Routing + */ +#define WM8996_ADC2R_TO_DSP2TXR 0x0002 /* ADC2R_TO_DSP2TXR */ +#define WM8996_ADC2R_TO_DSP2TXR_MASK 0x0002 /* ADC2R_TO_DSP2TXR */ +#define WM8996_ADC2R_TO_DSP2TXR_SHIFT 1 /* ADC2R_TO_DSP2TXR */ +#define WM8996_ADC2R_TO_DSP2TXR_WIDTH 1 /* ADC2R_TO_DSP2TXR */ +#define WM8996_DACR_TO_DSP2TXR 0x0001 /* DACR_TO_DSP2TXR */ +#define WM8996_DACR_TO_DSP2TXR_MASK 0x0001 /* DACR_TO_DSP2TXR */ +#define WM8996_DACR_TO_DSP2TXR_SHIFT 0 /* DACR_TO_DSP2TXR */ +#define WM8996_DACR_TO_DSP2TXR_WIDTH 1 /* DACR_TO_DSP2TXR */ + +/* + * R1546 (0x60A) - DSP TX Mixer Select + */ +#define WM8996_DAC_TO_DSPTX_SRC 0x0001 /* DAC_TO_DSPTX_SRC */ +#define WM8996_DAC_TO_DSPTX_SRC_MASK 0x0001 /* DAC_TO_DSPTX_SRC */ +#define WM8996_DAC_TO_DSPTX_SRC_SHIFT 0 /* DAC_TO_DSPTX_SRC */ +#define WM8996_DAC_TO_DSPTX_SRC_WIDTH 1 /* DAC_TO_DSPTX_SRC */ + +/* + * R1552 (0x610) - DAC Softmute + */ +#define WM8996_DAC_SOFTMUTEMODE 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8996_DAC_SOFTMUTEMODE_MASK 0x0002 /* DAC_SOFTMUTEMODE */ +#define WM8996_DAC_SOFTMUTEMODE_SHIFT 1 /* DAC_SOFTMUTEMODE */ +#define WM8996_DAC_SOFTMUTEMODE_WIDTH 1 /* DAC_SOFTMUTEMODE */ +#define WM8996_DAC_MUTERATE 0x0001 /* DAC_MUTERATE */ +#define WM8996_DAC_MUTERATE_MASK 0x0001 /* DAC_MUTERATE */ +#define WM8996_DAC_MUTERATE_SHIFT 0 /* DAC_MUTERATE */ +#define WM8996_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */ + +/* + * R1568 (0x620) - Oversampling + */ +#define WM8996_SPK_OSR128 0x0008 /* SPK_OSR128 */ +#define WM8996_SPK_OSR128_MASK 0x0008 /* SPK_OSR128 */ +#define WM8996_SPK_OSR128_SHIFT 3 /* SPK_OSR128 */ +#define WM8996_SPK_OSR128_WIDTH 1 /* SPK_OSR128 */ +#define WM8996_DMIC_OSR64 0x0004 /* DMIC_OSR64 */ +#define WM8996_DMIC_OSR64_MASK 0x0004 /* DMIC_OSR64 */ +#define WM8996_DMIC_OSR64_SHIFT 2 /* DMIC_OSR64 */ +#define WM8996_DMIC_OSR64_WIDTH 1 /* DMIC_OSR64 */ +#define WM8996_ADC_OSR128 0x0002 /* ADC_OSR128 */ +#define WM8996_ADC_OSR128_MASK 0x0002 /* ADC_OSR128 */ +#define WM8996_ADC_OSR128_SHIFT 1 /* ADC_OSR128 */ +#define WM8996_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */ +#define WM8996_DAC_OSR128 0x0001 /* DAC_OSR128 */ +#define WM8996_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */ +#define WM8996_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */ +#define WM8996_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */ + +/* + * R1569 (0x621) - Sidetone + */ +#define WM8996_ST_LPF 0x1000 /* ST_LPF */ +#define WM8996_ST_LPF_MASK 0x1000 /* ST_LPF */ +#define WM8996_ST_LPF_SHIFT 12 /* ST_LPF */ +#define WM8996_ST_LPF_WIDTH 1 /* ST_LPF */ +#define WM8996_ST_HPF_CUT_MASK 0x0380 /* ST_HPF_CUT - [9:7] */ +#define WM8996_ST_HPF_CUT_SHIFT 7 /* ST_HPF_CUT - [9:7] */ +#define WM8996_ST_HPF_CUT_WIDTH 3 /* ST_HPF_CUT - [9:7] */ +#define WM8996_ST_HPF 0x0040 /* ST_HPF */ +#define WM8996_ST_HPF_MASK 0x0040 /* ST_HPF */ +#define WM8996_ST_HPF_SHIFT 6 /* ST_HPF */ +#define WM8996_ST_HPF_WIDTH 1 /* ST_HPF */ +#define WM8996_STR_SEL 0x0002 /* STR_SEL */ +#define WM8996_STR_SEL_MASK 0x0002 /* STR_SEL */ +#define WM8996_STR_SEL_SHIFT 1 /* STR_SEL */ +#define WM8996_STR_SEL_WIDTH 1 /* STR_SEL */ +#define WM8996_STL_SEL 0x0001 /* STL_SEL */ +#define WM8996_STL_SEL_MASK 0x0001 /* STL_SEL */ +#define WM8996_STL_SEL_SHIFT 0 /* STL_SEL */ +#define WM8996_STL_SEL_WIDTH 1 /* STL_SEL */ + +/* + * R1792 (0x700) - GPIO 1 + */ +#define WM8996_GP1_DIR 0x8000 /* GP1_DIR */ +#define WM8996_GP1_DIR_MASK 0x8000 /* GP1_DIR */ +#define WM8996_GP1_DIR_SHIFT 15 /* GP1_DIR */ +#define WM8996_GP1_DIR_WIDTH 1 /* GP1_DIR */ +#define WM8996_GP1_PU 0x4000 /* GP1_PU */ +#define WM8996_GP1_PU_MASK 0x4000 /* GP1_PU */ +#define WM8996_GP1_PU_SHIFT 14 /* GP1_PU */ +#define WM8996_GP1_PU_WIDTH 1 /* GP1_PU */ +#define WM8996_GP1_PD 0x2000 /* GP1_PD */ +#define WM8996_GP1_PD_MASK 0x2000 /* GP1_PD */ +#define WM8996_GP1_PD_SHIFT 13 /* GP1_PD */ +#define WM8996_GP1_PD_WIDTH 1 /* GP1_PD */ +#define WM8996_GP1_POL 0x0400 /* GP1_POL */ +#define WM8996_GP1_POL_MASK 0x0400 /* GP1_POL */ +#define WM8996_GP1_POL_SHIFT 10 /* GP1_POL */ +#define WM8996_GP1_POL_WIDTH 1 /* GP1_POL */ +#define WM8996_GP1_OP_CFG 0x0200 /* GP1_OP_CFG */ +#define WM8996_GP1_OP_CFG_MASK 0x0200 /* GP1_OP_CFG */ +#define WM8996_GP1_OP_CFG_SHIFT 9 /* GP1_OP_CFG */ +#define WM8996_GP1_OP_CFG_WIDTH 1 /* GP1_OP_CFG */ +#define WM8996_GP1_DB 0x0100 /* GP1_DB */ +#define WM8996_GP1_DB_MASK 0x0100 /* GP1_DB */ +#define WM8996_GP1_DB_SHIFT 8 /* GP1_DB */ +#define WM8996_GP1_DB_WIDTH 1 /* GP1_DB */ +#define WM8996_GP1_LVL 0x0040 /* GP1_LVL */ +#define WM8996_GP1_LVL_MASK 0x0040 /* GP1_LVL */ +#define WM8996_GP1_LVL_SHIFT 6 /* GP1_LVL */ +#define WM8996_GP1_LVL_WIDTH 1 /* GP1_LVL */ +#define WM8996_GP1_FN_MASK 0x000F /* GP1_FN - [3:0] */ +#define WM8996_GP1_FN_SHIFT 0 /* GP1_FN - [3:0] */ +#define WM8996_GP1_FN_WIDTH 4 /* GP1_FN - [3:0] */ + +/* + * R1793 (0x701) - GPIO 2 + */ +#define WM8996_GP2_DIR 0x8000 /* GP2_DIR */ +#define WM8996_GP2_DIR_MASK 0x8000 /* GP2_DIR */ +#define WM8996_GP2_DIR_SHIFT 15 /* GP2_DIR */ +#define WM8996_GP2_DIR_WIDTH 1 /* GP2_DIR */ +#define WM8996_GP2_PU 0x4000 /* GP2_PU */ +#define WM8996_GP2_PU_MASK 0x4000 /* GP2_PU */ +#define WM8996_GP2_PU_SHIFT 14 /* GP2_PU */ +#define WM8996_GP2_PU_WIDTH 1 /* GP2_PU */ +#define WM8996_GP2_PD 0x2000 /* GP2_PD */ +#define WM8996_GP2_PD_MASK 0x2000 /* GP2_PD */ +#define WM8996_GP2_PD_SHIFT 13 /* GP2_PD */ +#define WM8996_GP2_PD_WIDTH 1 /* GP2_PD */ +#define WM8996_GP2_POL 0x0400 /* GP2_POL */ +#define WM8996_GP2_POL_MASK 0x0400 /* GP2_POL */ +#define WM8996_GP2_POL_SHIFT 10 /* GP2_POL */ +#define WM8996_GP2_POL_WIDTH 1 /* GP2_POL */ +#define WM8996_GP2_OP_CFG 0x0200 /* GP2_OP_CFG */ +#define WM8996_GP2_OP_CFG_MASK 0x0200 /* GP2_OP_CFG */ +#define WM8996_GP2_OP_CFG_SHIFT 9 /* GP2_OP_CFG */ +#define WM8996_GP2_OP_CFG_WIDTH 1 /* GP2_OP_CFG */ +#define WM8996_GP2_DB 0x0100 /* GP2_DB */ +#define WM8996_GP2_DB_MASK 0x0100 /* GP2_DB */ +#define WM8996_GP2_DB_SHIFT 8 /* GP2_DB */ +#define WM8996_GP2_DB_WIDTH 1 /* GP2_DB */ +#define WM8996_GP2_LVL 0x0040 /* GP2_LVL */ +#define WM8996_GP2_LVL_MASK 0x0040 /* GP2_LVL */ +#define WM8996_GP2_LVL_SHIFT 6 /* GP2_LVL */ +#define WM8996_GP2_LVL_WIDTH 1 /* GP2_LVL */ +#define WM8996_GP2_FN_MASK 0x000F /* GP2_FN - [3:0] */ +#define WM8996_GP2_FN_SHIFT 0 /* GP2_FN - [3:0] */ +#define WM8996_GP2_FN_WIDTH 4 /* GP2_FN - [3:0] */ + +/* + * R1794 (0x702) - GPIO 3 + */ +#define WM8996_GP3_DIR 0x8000 /* GP3_DIR */ +#define WM8996_GP3_DIR_MASK 0x8000 /* GP3_DIR */ +#define WM8996_GP3_DIR_SHIFT 15 /* GP3_DIR */ +#define WM8996_GP3_DIR_WIDTH 1 /* GP3_DIR */ +#define WM8996_GP3_PU 0x4000 /* GP3_PU */ +#define WM8996_GP3_PU_MASK 0x4000 /* GP3_PU */ +#define WM8996_GP3_PU_SHIFT 14 /* GP3_PU */ +#define WM8996_GP3_PU_WIDTH 1 /* GP3_PU */ +#define WM8996_GP3_PD 0x2000 /* GP3_PD */ +#define WM8996_GP3_PD_MASK 0x2000 /* GP3_PD */ +#define WM8996_GP3_PD_SHIFT 13 /* GP3_PD */ +#define WM8996_GP3_PD_WIDTH 1 /* GP3_PD */ +#define WM8996_GP3_POL 0x0400 /* GP3_POL */ +#define WM8996_GP3_POL_MASK 0x0400 /* GP3_POL */ +#define WM8996_GP3_POL_SHIFT 10 /* GP3_POL */ +#define WM8996_GP3_POL_WIDTH 1 /* GP3_POL */ +#define WM8996_GP3_OP_CFG 0x0200 /* GP3_OP_CFG */ +#define WM8996_GP3_OP_CFG_MASK 0x0200 /* GP3_OP_CFG */ +#define WM8996_GP3_OP_CFG_SHIFT 9 /* GP3_OP_CFG */ +#define WM8996_GP3_OP_CFG_WIDTH 1 /* GP3_OP_CFG */ +#define WM8996_GP3_DB 0x0100 /* GP3_DB */ +#define WM8996_GP3_DB_MASK 0x0100 /* GP3_DB */ +#define WM8996_GP3_DB_SHIFT 8 /* GP3_DB */ +#define WM8996_GP3_DB_WIDTH 1 /* GP3_DB */ +#define WM8996_GP3_LVL 0x0040 /* GP3_LVL */ +#define WM8996_GP3_LVL_MASK 0x0040 /* GP3_LVL */ +#define WM8996_GP3_LVL_SHIFT 6 /* GP3_LVL */ +#define WM8996_GP3_LVL_WIDTH 1 /* GP3_LVL */ +#define WM8996_GP3_FN_MASK 0x000F /* GP3_FN - [3:0] */ +#define WM8996_GP3_FN_SHIFT 0 /* GP3_FN - [3:0] */ +#define WM8996_GP3_FN_WIDTH 4 /* GP3_FN - [3:0] */ + +/* + * R1795 (0x703) - GPIO 4 + */ +#define WM8996_GP4_DIR 0x8000 /* GP4_DIR */ +#define WM8996_GP4_DIR_MASK 0x8000 /* GP4_DIR */ +#define WM8996_GP4_DIR_SHIFT 15 /* GP4_DIR */ +#define WM8996_GP4_DIR_WIDTH 1 /* GP4_DIR */ +#define WM8996_GP4_PU 0x4000 /* GP4_PU */ +#define WM8996_GP4_PU_MASK 0x4000 /* GP4_PU */ +#define WM8996_GP4_PU_SHIFT 14 /* GP4_PU */ +#define WM8996_GP4_PU_WIDTH 1 /* GP4_PU */ +#define WM8996_GP4_PD 0x2000 /* GP4_PD */ +#define WM8996_GP4_PD_MASK 0x2000 /* GP4_PD */ +#define WM8996_GP4_PD_SHIFT 13 /* GP4_PD */ +#define WM8996_GP4_PD_WIDTH 1 /* GP4_PD */ +#define WM8996_GP4_POL 0x0400 /* GP4_POL */ +#define WM8996_GP4_POL_MASK 0x0400 /* GP4_POL */ +#define WM8996_GP4_POL_SHIFT 10 /* GP4_POL */ +#define WM8996_GP4_POL_WIDTH 1 /* GP4_POL */ +#define WM8996_GP4_OP_CFG 0x0200 /* GP4_OP_CFG */ +#define WM8996_GP4_OP_CFG_MASK 0x0200 /* GP4_OP_CFG */ +#define WM8996_GP4_OP_CFG_SHIFT 9 /* GP4_OP_CFG */ +#define WM8996_GP4_OP_CFG_WIDTH 1 /* GP4_OP_CFG */ +#define WM8996_GP4_DB 0x0100 /* GP4_DB */ +#define WM8996_GP4_DB_MASK 0x0100 /* GP4_DB */ +#define WM8996_GP4_DB_SHIFT 8 /* GP4_DB */ +#define WM8996_GP4_DB_WIDTH 1 /* GP4_DB */ +#define WM8996_GP4_LVL 0x0040 /* GP4_LVL */ +#define WM8996_GP4_LVL_MASK 0x0040 /* GP4_LVL */ +#define WM8996_GP4_LVL_SHIFT 6 /* GP4_LVL */ +#define WM8996_GP4_LVL_WIDTH 1 /* GP4_LVL */ +#define WM8996_GP4_FN_MASK 0x000F /* GP4_FN - [3:0] */ +#define WM8996_GP4_FN_SHIFT 0 /* GP4_FN - [3:0] */ +#define WM8996_GP4_FN_WIDTH 4 /* GP4_FN - [3:0] */ + +/* + * R1796 (0x704) - GPIO 5 + */ +#define WM8996_GP5_DIR 0x8000 /* GP5_DIR */ +#define WM8996_GP5_DIR_MASK 0x8000 /* GP5_DIR */ +#define WM8996_GP5_DIR_SHIFT 15 /* GP5_DIR */ +#define WM8996_GP5_DIR_WIDTH 1 /* GP5_DIR */ +#define WM8996_GP5_PU 0x4000 /* GP5_PU */ +#define WM8996_GP5_PU_MASK 0x4000 /* GP5_PU */ +#define WM8996_GP5_PU_SHIFT 14 /* GP5_PU */ +#define WM8996_GP5_PU_WIDTH 1 /* GP5_PU */ +#define WM8996_GP5_PD 0x2000 /* GP5_PD */ +#define WM8996_GP5_PD_MASK 0x2000 /* GP5_PD */ +#define WM8996_GP5_PD_SHIFT 13 /* GP5_PD */ +#define WM8996_GP5_PD_WIDTH 1 /* GP5_PD */ +#define WM8996_GP5_POL 0x0400 /* GP5_POL */ +#define WM8996_GP5_POL_MASK 0x0400 /* GP5_POL */ +#define WM8996_GP5_POL_SHIFT 10 /* GP5_POL */ +#define WM8996_GP5_POL_WIDTH 1 /* GP5_POL */ +#define WM8996_GP5_OP_CFG 0x0200 /* GP5_OP_CFG */ +#define WM8996_GP5_OP_CFG_MASK 0x0200 /* GP5_OP_CFG */ +#define WM8996_GP5_OP_CFG_SHIFT 9 /* GP5_OP_CFG */ +#define WM8996_GP5_OP_CFG_WIDTH 1 /* GP5_OP_CFG */ +#define WM8996_GP5_DB 0x0100 /* GP5_DB */ +#define WM8996_GP5_DB_MASK 0x0100 /* GP5_DB */ +#define WM8996_GP5_DB_SHIFT 8 /* GP5_DB */ +#define WM8996_GP5_DB_WIDTH 1 /* GP5_DB */ +#define WM8996_GP5_LVL 0x0040 /* GP5_LVL */ +#define WM8996_GP5_LVL_MASK 0x0040 /* GP5_LVL */ +#define WM8996_GP5_LVL_SHIFT 6 /* GP5_LVL */ +#define WM8996_GP5_LVL_WIDTH 1 /* GP5_LVL */ +#define WM8996_GP5_FN_MASK 0x000F /* GP5_FN - [3:0] */ +#define WM8996_GP5_FN_SHIFT 0 /* GP5_FN - [3:0] */ +#define WM8996_GP5_FN_WIDTH 4 /* GP5_FN - [3:0] */ + +/* + * R1824 (0x720) - Pull Control (1) + */ +#define WM8996_DMICDAT2_PD 0x1000 /* DMICDAT2_PD */ +#define WM8996_DMICDAT2_PD_MASK 0x1000 /* DMICDAT2_PD */ +#define WM8996_DMICDAT2_PD_SHIFT 12 /* DMICDAT2_PD */ +#define WM8996_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */ +#define WM8996_DMICDAT1_PD 0x0400 /* DMICDAT1_PD */ +#define WM8996_DMICDAT1_PD_MASK 0x0400 /* DMICDAT1_PD */ +#define WM8996_DMICDAT1_PD_SHIFT 10 /* DMICDAT1_PD */ +#define WM8996_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */ +#define WM8996_MCLK2_PU 0x0200 /* MCLK2_PU */ +#define WM8996_MCLK2_PU_MASK 0x0200 /* MCLK2_PU */ +#define WM8996_MCLK2_PU_SHIFT 9 /* MCLK2_PU */ +#define WM8996_MCLK2_PU_WIDTH 1 /* MCLK2_PU */ +#define WM8996_MCLK2_PD 0x0100 /* MCLK2_PD */ +#define WM8996_MCLK2_PD_MASK 0x0100 /* MCLK2_PD */ +#define WM8996_MCLK2_PD_SHIFT 8 /* MCLK2_PD */ +#define WM8996_MCLK2_PD_WIDTH 1 /* MCLK2_PD */ +#define WM8996_MCLK1_PU 0x0080 /* MCLK1_PU */ +#define WM8996_MCLK1_PU_MASK 0x0080 /* MCLK1_PU */ +#define WM8996_MCLK1_PU_SHIFT 7 /* MCLK1_PU */ +#define WM8996_MCLK1_PU_WIDTH 1 /* MCLK1_PU */ +#define WM8996_MCLK1_PD 0x0040 /* MCLK1_PD */ +#define WM8996_MCLK1_PD_MASK 0x0040 /* MCLK1_PD */ +#define WM8996_MCLK1_PD_SHIFT 6 /* MCLK1_PD */ +#define WM8996_MCLK1_PD_WIDTH 1 /* MCLK1_PD */ +#define WM8996_DACDAT1_PU 0x0020 /* DACDAT1_PU */ +#define WM8996_DACDAT1_PU_MASK 0x0020 /* DACDAT1_PU */ +#define WM8996_DACDAT1_PU_SHIFT 5 /* DACDAT1_PU */ +#define WM8996_DACDAT1_PU_WIDTH 1 /* DACDAT1_PU */ +#define WM8996_DACDAT1_PD 0x0010 /* DACDAT1_PD */ +#define WM8996_DACDAT1_PD_MASK 0x0010 /* DACDAT1_PD */ +#define WM8996_DACDAT1_PD_SHIFT 4 /* DACDAT1_PD */ +#define WM8996_DACDAT1_PD_WIDTH 1 /* DACDAT1_PD */ +#define WM8996_DACLRCLK1_PU 0x0008 /* DACLRCLK1_PU */ +#define WM8996_DACLRCLK1_PU_MASK 0x0008 /* DACLRCLK1_PU */ +#define WM8996_DACLRCLK1_PU_SHIFT 3 /* DACLRCLK1_PU */ +#define WM8996_DACLRCLK1_PU_WIDTH 1 /* DACLRCLK1_PU */ +#define WM8996_DACLRCLK1_PD 0x0004 /* DACLRCLK1_PD */ +#define WM8996_DACLRCLK1_PD_MASK 0x0004 /* DACLRCLK1_PD */ +#define WM8996_DACLRCLK1_PD_SHIFT 2 /* DACLRCLK1_PD */ +#define WM8996_DACLRCLK1_PD_WIDTH 1 /* DACLRCLK1_PD */ +#define WM8996_BCLK1_PU 0x0002 /* BCLK1_PU */ +#define WM8996_BCLK1_PU_MASK 0x0002 /* BCLK1_PU */ +#define WM8996_BCLK1_PU_SHIFT 1 /* BCLK1_PU */ +#define WM8996_BCLK1_PU_WIDTH 1 /* BCLK1_PU */ +#define WM8996_BCLK1_PD 0x0001 /* BCLK1_PD */ +#define WM8996_BCLK1_PD_MASK 0x0001 /* BCLK1_PD */ +#define WM8996_BCLK1_PD_SHIFT 0 /* BCLK1_PD */ +#define WM8996_BCLK1_PD_WIDTH 1 /* BCLK1_PD */ + +/* + * R1825 (0x721) - Pull Control (2) + */ +#define WM8996_LDO1ENA_PD 0x0100 /* LDO1ENA_PD */ +#define WM8996_LDO1ENA_PD_MASK 0x0100 /* LDO1ENA_PD */ +#define WM8996_LDO1ENA_PD_SHIFT 8 /* LDO1ENA_PD */ +#define WM8996_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */ +#define WM8996_ADDR_PD 0x0040 /* ADDR_PD */ +#define WM8996_ADDR_PD_MASK 0x0040 /* ADDR_PD */ +#define WM8996_ADDR_PD_SHIFT 6 /* ADDR_PD */ +#define WM8996_ADDR_PD_WIDTH 1 /* ADDR_PD */ +#define WM8996_DACDAT2_PU 0x0020 /* DACDAT2_PU */ +#define WM8996_DACDAT2_PU_MASK 0x0020 /* DACDAT2_PU */ +#define WM8996_DACDAT2_PU_SHIFT 5 /* DACDAT2_PU */ +#define WM8996_DACDAT2_PU_WIDTH 1 /* DACDAT2_PU */ +#define WM8996_DACDAT2_PD 0x0010 /* DACDAT2_PD */ +#define WM8996_DACDAT2_PD_MASK 0x0010 /* DACDAT2_PD */ +#define WM8996_DACDAT2_PD_SHIFT 4 /* DACDAT2_PD */ +#define WM8996_DACDAT2_PD_WIDTH 1 /* DACDAT2_PD */ +#define WM8996_DACLRCLK2_PU 0x0008 /* DACLRCLK2_PU */ +#define WM8996_DACLRCLK2_PU_MASK 0x0008 /* DACLRCLK2_PU */ +#define WM8996_DACLRCLK2_PU_SHIFT 3 /* DACLRCLK2_PU */ +#define WM8996_DACLRCLK2_PU_WIDTH 1 /* DACLRCLK2_PU */ +#define WM8996_DACLRCLK2_PD 0x0004 /* DACLRCLK2_PD */ +#define WM8996_DACLRCLK2_PD_MASK 0x0004 /* DACLRCLK2_PD */ +#define WM8996_DACLRCLK2_PD_SHIFT 2 /* DACLRCLK2_PD */ +#define WM8996_DACLRCLK2_PD_WIDTH 1 /* DACLRCLK2_PD */ +#define WM8996_BCLK2_PU 0x0002 /* BCLK2_PU */ +#define WM8996_BCLK2_PU_MASK 0x0002 /* BCLK2_PU */ +#define WM8996_BCLK2_PU_SHIFT 1 /* BCLK2_PU */ +#define WM8996_BCLK2_PU_WIDTH 1 /* BCLK2_PU */ +#define WM8996_BCLK2_PD 0x0001 /* BCLK2_PD */ +#define WM8996_BCLK2_PD_MASK 0x0001 /* BCLK2_PD */ +#define WM8996_BCLK2_PD_SHIFT 0 /* BCLK2_PD */ +#define WM8996_BCLK2_PD_WIDTH 1 /* BCLK2_PD */ + +/* + * R1840 (0x730) - Interrupt Status 1 + */ +#define WM8996_GP5_EINT 0x0010 /* GP5_EINT */ +#define WM8996_GP5_EINT_MASK 0x0010 /* GP5_EINT */ +#define WM8996_GP5_EINT_SHIFT 4 /* GP5_EINT */ +#define WM8996_GP5_EINT_WIDTH 1 /* GP5_EINT */ +#define WM8996_GP4_EINT 0x0008 /* GP4_EINT */ +#define WM8996_GP4_EINT_MASK 0x0008 /* GP4_EINT */ +#define WM8996_GP4_EINT_SHIFT 3 /* GP4_EINT */ +#define WM8996_GP4_EINT_WIDTH 1 /* GP4_EINT */ +#define WM8996_GP3_EINT 0x0004 /* GP3_EINT */ +#define WM8996_GP3_EINT_MASK 0x0004 /* GP3_EINT */ +#define WM8996_GP3_EINT_SHIFT 2 /* GP3_EINT */ +#define WM8996_GP3_EINT_WIDTH 1 /* GP3_EINT */ +#define WM8996_GP2_EINT 0x0002 /* GP2_EINT */ +#define WM8996_GP2_EINT_MASK 0x0002 /* GP2_EINT */ +#define WM8996_GP2_EINT_SHIFT 1 /* GP2_EINT */ +#define WM8996_GP2_EINT_WIDTH 1 /* GP2_EINT */ +#define WM8996_GP1_EINT 0x0001 /* GP1_EINT */ +#define WM8996_GP1_EINT_MASK 0x0001 /* GP1_EINT */ +#define WM8996_GP1_EINT_SHIFT 0 /* GP1_EINT */ +#define WM8996_GP1_EINT_WIDTH 1 /* GP1_EINT */ + +/* + * R1841 (0x731) - Interrupt Status 2 + */ +#define WM8996_DCS_DONE_23_EINT 0x1000 /* DCS_DONE_23_EINT */ +#define WM8996_DCS_DONE_23_EINT_MASK 0x1000 /* DCS_DONE_23_EINT */ +#define WM8996_DCS_DONE_23_EINT_SHIFT 12 /* DCS_DONE_23_EINT */ +#define WM8996_DCS_DONE_23_EINT_WIDTH 1 /* DCS_DONE_23_EINT */ +#define WM8996_DCS_DONE_01_EINT 0x0800 /* DCS_DONE_01_EINT */ +#define WM8996_DCS_DONE_01_EINT_MASK 0x0800 /* DCS_DONE_01_EINT */ +#define WM8996_DCS_DONE_01_EINT_SHIFT 11 /* DCS_DONE_01_EINT */ +#define WM8996_DCS_DONE_01_EINT_WIDTH 1 /* DCS_DONE_01_EINT */ +#define WM8996_WSEQ_DONE_EINT 0x0400 /* WSEQ_DONE_EINT */ +#define WM8996_WSEQ_DONE_EINT_MASK 0x0400 /* WSEQ_DONE_EINT */ +#define WM8996_WSEQ_DONE_EINT_SHIFT 10 /* WSEQ_DONE_EINT */ +#define WM8996_WSEQ_DONE_EINT_WIDTH 1 /* WSEQ_DONE_EINT */ +#define WM8996_FIFOS_ERR_EINT 0x0200 /* FIFOS_ERR_EINT */ +#define WM8996_FIFOS_ERR_EINT_MASK 0x0200 /* FIFOS_ERR_EINT */ +#define WM8996_FIFOS_ERR_EINT_SHIFT 9 /* FIFOS_ERR_EINT */ +#define WM8996_FIFOS_ERR_EINT_WIDTH 1 /* FIFOS_ERR_EINT */ +#define WM8996_DSP2DRC_SIG_DET_EINT 0x0080 /* DSP2DRC_SIG_DET_EINT */ +#define WM8996_DSP2DRC_SIG_DET_EINT_MASK 0x0080 /* DSP2DRC_SIG_DET_EINT */ +#define WM8996_DSP2DRC_SIG_DET_EINT_SHIFT 7 /* DSP2DRC_SIG_DET_EINT */ +#define WM8996_DSP2DRC_SIG_DET_EINT_WIDTH 1 /* DSP2DRC_SIG_DET_EINT */ +#define WM8996_DSP1DRC_SIG_DET_EINT 0x0040 /* DSP1DRC_SIG_DET_EINT */ +#define WM8996_DSP1DRC_SIG_DET_EINT_MASK 0x0040 /* DSP1DRC_SIG_DET_EINT */ +#define WM8996_DSP1DRC_SIG_DET_EINT_SHIFT 6 /* DSP1DRC_SIG_DET_EINT */ +#define WM8996_DSP1DRC_SIG_DET_EINT_WIDTH 1 /* DSP1DRC_SIG_DET_EINT */ +#define WM8996_FLL_SW_CLK_DONE_EINT 0x0008 /* FLL_SW_CLK_DONE_EINT */ +#define WM8996_FLL_SW_CLK_DONE_EINT_MASK 0x0008 /* FLL_SW_CLK_DONE_EINT */ +#define WM8996_FLL_SW_CLK_DONE_EINT_SHIFT 3 /* FLL_SW_CLK_DONE_EINT */ +#define WM8996_FLL_SW_CLK_DONE_EINT_WIDTH 1 /* FLL_SW_CLK_DONE_EINT */ +#define WM8996_FLL_LOCK_EINT 0x0004 /* FLL_LOCK_EINT */ +#define WM8996_FLL_LOCK_EINT_MASK 0x0004 /* FLL_LOCK_EINT */ +#define WM8996_FLL_LOCK_EINT_SHIFT 2 /* FLL_LOCK_EINT */ +#define WM8996_FLL_LOCK_EINT_WIDTH 1 /* FLL_LOCK_EINT */ +#define WM8996_HP_DONE_EINT 0x0002 /* HP_DONE_EINT */ +#define WM8996_HP_DONE_EINT_MASK 0x0002 /* HP_DONE_EINT */ +#define WM8996_HP_DONE_EINT_SHIFT 1 /* HP_DONE_EINT */ +#define WM8996_HP_DONE_EINT_WIDTH 1 /* HP_DONE_EINT */ +#define WM8996_MICD_EINT 0x0001 /* MICD_EINT */ +#define WM8996_MICD_EINT_MASK 0x0001 /* MICD_EINT */ +#define WM8996_MICD_EINT_SHIFT 0 /* MICD_EINT */ +#define WM8996_MICD_EINT_WIDTH 1 /* MICD_EINT */ + +/* + * R1842 (0x732) - Interrupt Raw Status 2 + */ +#define WM8996_DCS_DONE_23_STS 0x1000 /* DCS_DONE_23_STS */ +#define WM8996_DCS_DONE_23_STS_MASK 0x1000 /* DCS_DONE_23_STS */ +#define WM8996_DCS_DONE_23_STS_SHIFT 12 /* DCS_DONE_23_STS */ +#define WM8996_DCS_DONE_23_STS_WIDTH 1 /* DCS_DONE_23_STS */ +#define WM8996_DCS_DONE_01_STS 0x0800 /* DCS_DONE_01_STS */ +#define WM8996_DCS_DONE_01_STS_MASK 0x0800 /* DCS_DONE_01_STS */ +#define WM8996_DCS_DONE_01_STS_SHIFT 11 /* DCS_DONE_01_STS */ +#define WM8996_DCS_DONE_01_STS_WIDTH 1 /* DCS_DONE_01_STS */ +#define WM8996_WSEQ_DONE_STS 0x0400 /* WSEQ_DONE_STS */ +#define WM8996_WSEQ_DONE_STS_MASK 0x0400 /* WSEQ_DONE_STS */ +#define WM8996_WSEQ_DONE_STS_SHIFT 10 /* WSEQ_DONE_STS */ +#define WM8996_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */ +#define WM8996_FIFOS_ERR_STS 0x0200 /* FIFOS_ERR_STS */ +#define WM8996_FIFOS_ERR_STS_MASK 0x0200 /* FIFOS_ERR_STS */ +#define WM8996_FIFOS_ERR_STS_SHIFT 9 /* FIFOS_ERR_STS */ +#define WM8996_FIFOS_ERR_STS_WIDTH 1 /* FIFOS_ERR_STS */ +#define WM8996_DSP2DRC_SIG_DET_STS 0x0080 /* DSP2DRC_SIG_DET_STS */ +#define WM8996_DSP2DRC_SIG_DET_STS_MASK 0x0080 /* DSP2DRC_SIG_DET_STS */ +#define WM8996_DSP2DRC_SIG_DET_STS_SHIFT 7 /* DSP2DRC_SIG_DET_STS */ +#define WM8996_DSP2DRC_SIG_DET_STS_WIDTH 1 /* DSP2DRC_SIG_DET_STS */ +#define WM8996_DSP1DRC_SIG_DET_STS 0x0040 /* DSP1DRC_SIG_DET_STS */ +#define WM8996_DSP1DRC_SIG_DET_STS_MASK 0x0040 /* DSP1DRC_SIG_DET_STS */ +#define WM8996_DSP1DRC_SIG_DET_STS_SHIFT 6 /* DSP1DRC_SIG_DET_STS */ +#define WM8996_DSP1DRC_SIG_DET_STS_WIDTH 1 /* DSP1DRC_SIG_DET_STS */ +#define WM8996_FLL_LOCK_STS 0x0004 /* FLL_LOCK_STS */ +#define WM8996_FLL_LOCK_STS_MASK 0x0004 /* FLL_LOCK_STS */ +#define WM8996_FLL_LOCK_STS_SHIFT 2 /* FLL_LOCK_STS */ +#define WM8996_FLL_LOCK_STS_WIDTH 1 /* FLL_LOCK_STS */ + +/* + * R1848 (0x738) - Interrupt Status 1 Mask + */ +#define WM8996_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */ +#define WM8996_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */ +#define WM8996_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */ +#define WM8996_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */ +#define WM8996_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */ +#define WM8996_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */ +#define WM8996_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */ +#define WM8996_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */ +#define WM8996_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */ +#define WM8996_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */ +#define WM8996_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */ +#define WM8996_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */ +#define WM8996_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */ +#define WM8996_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */ +#define WM8996_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */ +#define WM8996_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */ +#define WM8996_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */ +#define WM8996_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */ +#define WM8996_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */ +#define WM8996_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */ + +/* + * R1849 (0x739) - Interrupt Status 2 Mask + */ +#define WM8996_IM_DCS_DONE_23_EINT 0x1000 /* IM_DCS_DONE_23_EINT */ +#define WM8996_IM_DCS_DONE_23_EINT_MASK 0x1000 /* IM_DCS_DONE_23_EINT */ +#define WM8996_IM_DCS_DONE_23_EINT_SHIFT 12 /* IM_DCS_DONE_23_EINT */ +#define WM8996_IM_DCS_DONE_23_EINT_WIDTH 1 /* IM_DCS_DONE_23_EINT */ +#define WM8996_IM_DCS_DONE_01_EINT 0x0800 /* IM_DCS_DONE_01_EINT */ +#define WM8996_IM_DCS_DONE_01_EINT_MASK 0x0800 /* IM_DCS_DONE_01_EINT */ +#define WM8996_IM_DCS_DONE_01_EINT_SHIFT 11 /* IM_DCS_DONE_01_EINT */ +#define WM8996_IM_DCS_DONE_01_EINT_WIDTH 1 /* IM_DCS_DONE_01_EINT */ +#define WM8996_IM_WSEQ_DONE_EINT 0x0400 /* IM_WSEQ_DONE_EINT */ +#define WM8996_IM_WSEQ_DONE_EINT_MASK 0x0400 /* IM_WSEQ_DONE_EINT */ +#define WM8996_IM_WSEQ_DONE_EINT_SHIFT 10 /* IM_WSEQ_DONE_EINT */ +#define WM8996_IM_WSEQ_DONE_EINT_WIDTH 1 /* IM_WSEQ_DONE_EINT */ +#define WM8996_IM_FIFOS_ERR_EINT 0x0200 /* IM_FIFOS_ERR_EINT */ +#define WM8996_IM_FIFOS_ERR_EINT_MASK 0x0200 /* IM_FIFOS_ERR_EINT */ +#define WM8996_IM_FIFOS_ERR_EINT_SHIFT 9 /* IM_FIFOS_ERR_EINT */ +#define WM8996_IM_FIFOS_ERR_EINT_WIDTH 1 /* IM_FIFOS_ERR_EINT */ +#define WM8996_IM_DSP2DRC_SIG_DET_EINT 0x0080 /* IM_DSP2DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP2DRC_SIG_DET_EINT_MASK 0x0080 /* IM_DSP2DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP2DRC_SIG_DET_EINT_SHIFT 7 /* IM_DSP2DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP2DRC_SIG_DET_EINT_WIDTH 1 /* IM_DSP2DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP1DRC_SIG_DET_EINT 0x0040 /* IM_DSP1DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP1DRC_SIG_DET_EINT_MASK 0x0040 /* IM_DSP1DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP1DRC_SIG_DET_EINT_SHIFT 6 /* IM_DSP1DRC_SIG_DET_EINT */ +#define WM8996_IM_DSP1DRC_SIG_DET_EINT_WIDTH 1 /* IM_DSP1DRC_SIG_DET_EINT */ +#define WM8996_IM_FLL_SW_CLK_DONE_EINT 0x0008 /* IM_FLL_SW_CLK_DONE_EINT */ +#define WM8996_IM_FLL_SW_CLK_DONE_EINT_MASK 0x0008 /* IM_FLL_SW_CLK_DONE_EINT */ +#define WM8996_IM_FLL_SW_CLK_DONE_EINT_SHIFT 3 /* IM_FLL_SW_CLK_DONE_EINT */ +#define WM8996_IM_FLL_SW_CLK_DONE_EINT_WIDTH 1 /* IM_FLL_SW_CLK_DONE_EINT */ +#define WM8996_IM_FLL_LOCK_EINT 0x0004 /* IM_FLL_LOCK_EINT */ +#define WM8996_IM_FLL_LOCK_EINT_MASK 0x0004 /* IM_FLL_LOCK_EINT */ +#define WM8996_IM_FLL_LOCK_EINT_SHIFT 2 /* IM_FLL_LOCK_EINT */ +#define WM8996_IM_FLL_LOCK_EINT_WIDTH 1 /* IM_FLL_LOCK_EINT */ +#define WM8996_IM_HP_DONE_EINT 0x0002 /* IM_HP_DONE_EINT */ +#define WM8996_IM_HP_DONE_EINT_MASK 0x0002 /* IM_HP_DONE_EINT */ +#define WM8996_IM_HP_DONE_EINT_SHIFT 1 /* IM_HP_DONE_EINT */ +#define WM8996_IM_HP_DONE_EINT_WIDTH 1 /* IM_HP_DONE_EINT */ +#define WM8996_IM_MICD_EINT 0x0001 /* IM_MICD_EINT */ +#define WM8996_IM_MICD_EINT_MASK 0x0001 /* IM_MICD_EINT */ +#define WM8996_IM_MICD_EINT_SHIFT 0 /* IM_MICD_EINT */ +#define WM8996_IM_MICD_EINT_WIDTH 1 /* IM_MICD_EINT */ + +/* + * R1856 (0x740) - Interrupt Control + */ +#define WM8996_IM_IRQ 0x0001 /* IM_IRQ */ +#define WM8996_IM_IRQ_MASK 0x0001 /* IM_IRQ */ +#define WM8996_IM_IRQ_SHIFT 0 /* IM_IRQ */ +#define WM8996_IM_IRQ_WIDTH 1 /* IM_IRQ */ + +/* + * R2048 (0x800) - Left PDM Speaker + */ +#define WM8996_SPKL_ENA 0x0010 /* SPKL_ENA */ +#define WM8996_SPKL_ENA_MASK 0x0010 /* SPKL_ENA */ +#define WM8996_SPKL_ENA_SHIFT 4 /* SPKL_ENA */ +#define WM8996_SPKL_ENA_WIDTH 1 /* SPKL_ENA */ +#define WM8996_SPKL_MUTE 0x0008 /* SPKL_MUTE */ +#define WM8996_SPKL_MUTE_MASK 0x0008 /* SPKL_MUTE */ +#define WM8996_SPKL_MUTE_SHIFT 3 /* SPKL_MUTE */ +#define WM8996_SPKL_MUTE_WIDTH 1 /* SPKL_MUTE */ +#define WM8996_SPKL_MUTE_ZC 0x0004 /* SPKL_MUTE_ZC */ +#define WM8996_SPKL_MUTE_ZC_MASK 0x0004 /* SPKL_MUTE_ZC */ +#define WM8996_SPKL_MUTE_ZC_SHIFT 2 /* SPKL_MUTE_ZC */ +#define WM8996_SPKL_MUTE_ZC_WIDTH 1 /* SPKL_MUTE_ZC */ +#define WM8996_SPKL_SRC_MASK 0x0003 /* SPKL_SRC - [1:0] */ +#define WM8996_SPKL_SRC_SHIFT 0 /* SPKL_SRC - [1:0] */ +#define WM8996_SPKL_SRC_WIDTH 2 /* SPKL_SRC - [1:0] */ + +/* + * R2049 (0x801) - Right PDM Speaker + */ +#define WM8996_SPKR_ENA 0x0010 /* SPKR_ENA */ +#define WM8996_SPKR_ENA_MASK 0x0010 /* SPKR_ENA */ +#define WM8996_SPKR_ENA_SHIFT 4 /* SPKR_ENA */ +#define WM8996_SPKR_ENA_WIDTH 1 /* SPKR_ENA */ +#define WM8996_SPKR_MUTE 0x0008 /* SPKR_MUTE */ +#define WM8996_SPKR_MUTE_MASK 0x0008 /* SPKR_MUTE */ +#define WM8996_SPKR_MUTE_SHIFT 3 /* SPKR_MUTE */ +#define WM8996_SPKR_MUTE_WIDTH 1 /* SPKR_MUTE */ +#define WM8996_SPKR_MUTE_ZC 0x0004 /* SPKR_MUTE_ZC */ +#define WM8996_SPKR_MUTE_ZC_MASK 0x0004 /* SPKR_MUTE_ZC */ +#define WM8996_SPKR_MUTE_ZC_SHIFT 2 /* SPKR_MUTE_ZC */ +#define WM8996_SPKR_MUTE_ZC_WIDTH 1 /* SPKR_MUTE_ZC */ +#define WM8996_SPKR_SRC_MASK 0x0003 /* SPKR_SRC - [1:0] */ +#define WM8996_SPKR_SRC_SHIFT 0 /* SPKR_SRC - [1:0] */ +#define WM8996_SPKR_SRC_WIDTH 2 /* SPKR_SRC - [1:0] */ + +/* + * R2050 (0x802) - PDM Speaker Mute Sequence + */ +#define WM8996_SPK_MUTE_ENDIAN 0x0100 /* SPK_MUTE_ENDIAN */ +#define WM8996_SPK_MUTE_ENDIAN_MASK 0x0100 /* SPK_MUTE_ENDIAN */ +#define WM8996_SPK_MUTE_ENDIAN_SHIFT 8 /* SPK_MUTE_ENDIAN */ +#define WM8996_SPK_MUTE_ENDIAN_WIDTH 1 /* SPK_MUTE_ENDIAN */ +#define WM8996_SPK_MUTE_SEQ1_MASK 0x00FF /* SPK_MUTE_SEQ1 - [7:0] */ +#define WM8996_SPK_MUTE_SEQ1_SHIFT 0 /* SPK_MUTE_SEQ1 - [7:0] */ +#define WM8996_SPK_MUTE_SEQ1_WIDTH 8 /* SPK_MUTE_SEQ1 - [7:0] */ + +/* + * R2051 (0x803) - PDM Speaker Volume + */ +#define WM8996_SPKR_VOL_MASK 0x00F0 /* SPKR_VOL - [7:4] */ +#define WM8996_SPKR_VOL_SHIFT 4 /* SPKR_VOL - [7:4] */ +#define WM8996_SPKR_VOL_WIDTH 4 /* SPKR_VOL - [7:4] */ +#define WM8996_SPKL_VOL_MASK 0x000F /* SPKL_VOL - [3:0] */ +#define WM8996_SPKL_VOL_SHIFT 0 /* SPKL_VOL - [3:0] */ +#define WM8996_SPKL_VOL_WIDTH 4 /* SPKL_VOL - [3:0] */ + +#endif diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index 54b0e4b..b99091f 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig @@ -183,7 +183,7 @@ config SND_SOC_SPEYSIDE tristate "Audio support for Wolfson Speyside" depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 select SND_SAMSUNG_I2S - select SND_SOC_WM8915 + select SND_SOC_WM8996 select SND_SOC_WM9081 config SND_SOC_SPEYSIDE_WM8962 diff --git a/sound/soc/samsung/speyside.c b/sound/soc/samsung/speyside.c index d6dee4d..590e927 100644 --- a/sound/soc/samsung/speyside.c +++ b/sound/soc/samsung/speyside.c @@ -14,10 +14,10 @@ #include #include -#include "../codecs/wm8915.h" +#include "../codecs/wm8996.h" #include "../codecs/wm9081.h" -#define WM8915_HPSEL_GPIO 214 +#define WM8996_HPSEL_GPIO 214 static int speyside_set_bias_level(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, @@ -31,12 +31,12 @@ static int speyside_set_bias_level(struct snd_soc_card *card, switch (level) { case SND_SOC_BIAS_STANDBY: - ret = snd_soc_dai_set_sysclk(codec_dai, WM8915_SYSCLK_MCLK2, + ret = snd_soc_dai_set_sysclk(codec_dai, WM8996_SYSCLK_MCLK2, 32768, SND_SOC_CLOCK_IN); if (ret < 0) return ret; - ret = snd_soc_dai_set_pll(codec_dai, WM8915_FLL_MCLK2, + ret = snd_soc_dai_set_pll(codec_dai, WM8996_FLL_MCLK2, 0, 0, 0); if (ret < 0) { pr_err("Failed to stop FLL\n"); @@ -65,7 +65,7 @@ static int speyside_set_bias_level_post(struct snd_soc_card *card, case SND_SOC_BIAS_PREPARE: if (card->dapm.bias_level == SND_SOC_BIAS_STANDBY) { ret = snd_soc_dai_set_pll(codec_dai, 0, - WM8915_FLL_MCLK2, + WM8996_FLL_MCLK2, 32768, 48000 * 256); if (ret < 0) { pr_err("Failed to start FLL\n"); @@ -73,7 +73,7 @@ static int speyside_set_bias_level_post(struct snd_soc_card *card, } ret = snd_soc_dai_set_sysclk(codec_dai, - WM8915_SYSCLK_FLL, + WM8996_SYSCLK_FLL, 48000 * 256, SND_SOC_CLOCK_IN); if (ret < 0) @@ -149,26 +149,26 @@ static void speyside_set_polarity(struct snd_soc_codec *codec, int polarity) { speyside_jack_polarity = !polarity; - gpio_direction_output(WM8915_HPSEL_GPIO, speyside_jack_polarity); + gpio_direction_output(WM8996_HPSEL_GPIO, speyside_jack_polarity); /* Re-run DAPM to make sure we're using the correct mic bias */ snd_soc_dapm_sync(&codec->dapm); } -static int speyside_wm8915_init(struct snd_soc_pcm_runtime *rtd) +static int speyside_wm8996_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *dai = rtd->codec_dai; struct snd_soc_codec *codec = rtd->codec; int ret; - ret = snd_soc_dai_set_sysclk(dai, WM8915_SYSCLK_MCLK2, 32768, 0); + ret = snd_soc_dai_set_sysclk(dai, WM8996_SYSCLK_MCLK2, 32768, 0); if (ret < 0) return ret; - ret = gpio_request(WM8915_HPSEL_GPIO, "HP_SEL"); + ret = gpio_request(WM8996_HPSEL_GPIO, "HP_SEL"); if (ret != 0) pr_err("Failed to request HP_SEL GPIO: %d\n", ret); - gpio_direction_output(WM8915_HPSEL_GPIO, speyside_jack_polarity); + gpio_direction_output(WM8996_HPSEL_GPIO, speyside_jack_polarity); ret = snd_soc_jack_new(codec, "Headset", SND_JACK_HEADSET | SND_JACK_BTN_0, @@ -182,7 +182,7 @@ static int speyside_wm8915_init(struct snd_soc_pcm_runtime *rtd) if (ret) return ret; - wm8915_detect(codec, &speyside_headset, speyside_set_polarity); + wm8996_detect(codec, &speyside_headset, speyside_set_polarity); return 0; } @@ -205,16 +205,16 @@ static struct snd_soc_dai_link speyside_dai[] = { .name = "CPU", .stream_name = "CPU", .cpu_dai_name = "samsung-i2s.0", - .codec_dai_name = "wm8915-aif1", + .codec_dai_name = "wm8996-aif1", .platform_name = "samsung-audio", - .codec_name = "wm8915.1-001a", - .init = speyside_wm8915_init, + .codec_name = "wm8996.1-001a", + .init = speyside_wm8996_init, .ops = &speyside_ops, }, { .name = "Baseband", .stream_name = "Baseband", - .cpu_dai_name = "wm8915-aif2", + .cpu_dai_name = "wm8996-aif2", .codec_dai_name = "wm1250-ev1", .codec_name = "wm1250-ev1.1-0027", .ops = &speyside_ops, -- cgit v0.10.2 From 844970916c8e50f630ea1a6ac82f09c42b12660a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 20 Jul 2011 13:49:58 +0100 Subject: ASoC: Acknowledge WM8996 interrupts before acting on them This closes the small race between a status being read in response to an interrupt and clearing the interrupt, meaning that if the status changes between those periods we might not get a reassertion of the interrupt. Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c index 00f9ace..ab8e9d1 100644 --- a/sound/soc/codecs/wm8996.c +++ b/sound/soc/codecs/wm8996.c @@ -2404,6 +2404,8 @@ static irqreturn_t wm8996_irq(int irq, void *data) } irq_val &= ~snd_soc_read(codec, WM8996_INTERRUPT_STATUS_2_MASK); + snd_soc_write(codec, WM8996_INTERRUPT_STATUS_2, irq_val); + if (irq_val & (WM8996_DCS_DONE_01_EINT | WM8996_DCS_DONE_23_EINT)) { dev_dbg(codec->dev, "DC servo IRQ\n"); complete(&wm8996->dcs_done); @@ -2420,13 +2422,10 @@ static irqreturn_t wm8996_irq(int irq, void *data) if (irq_val & WM8996_MICD_EINT) wm8996_micd(codec); - if (irq_val) { - snd_soc_write(codec, WM8996_INTERRUPT_STATUS_2, irq_val); - + if (irq_val) return IRQ_HANDLED; - } else { + else return IRQ_NONE; - } } static irqreturn_t wm8996_edge_irq(int irq, void *data) -- cgit v0.10.2 From dd23198e58cd35259dd09e8892bbdb90f1d57748 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Sun, 7 Aug 2011 22:31:07 -0700 Subject: ipv4: Fix ip_getsockopt for IP_PKTOPTIONS IP_PKTOPTIONS is broken for 32-bit applications running in COMPAT mode on 64-bit kernels. This happens because msghdr's msg_flags field is always set to zero. When running in COMPAT mode this should be set to MSG_CMSG_COMPAT instead. Signed-off-by: Tiberiu Szocs-Mihai Signed-off-by: Daniel Baluta Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ab0c9ef..8905e92 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1067,7 +1067,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt); */ static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) + char __user *optval, int __user *optlen, unsigned flags) { struct inet_sock *inet = inet_sk(sk); int val; @@ -1240,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, msg.msg_control = optval; msg.msg_controllen = len; - msg.msg_flags = 0; + msg.msg_flags = flags; if (inet->cmsg_flags & IP_CMSG_PKTINFO) { struct in_pktinfo info; @@ -1294,7 +1294,7 @@ int ip_getsockopt(struct sock *sk, int level, { int err; - err = do_ip_getsockopt(sk, level, optname, optval, optlen); + err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && @@ -1327,7 +1327,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, return compat_mc_getsockopt(sk, level, optname, optval, optlen, ip_getsockopt); - err = do_ip_getsockopt(sk, level, optname, optval, optlen); + err = do_ip_getsockopt(sk, level, optname, optval, optlen, + MSG_CMSG_COMPAT); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ -- cgit v0.10.2 From 6602a4baf4d1a73cc4685a39ef859e1c5ddf654c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 7 Aug 2011 22:48:07 -0700 Subject: net: Make userland include of netlink.h more sane. Currently userland will barf when including linux/netlink.h unless it precisely includes sys/socket.h first. The issue is where the definition of "sa_family_t" comes from. We've been back and forth on how to fix this issue in the past, see: http://thread.gmane.org/gmane.linux.debian.devel.bugs.general/622621 http://thread.gmane.org/gmane.linux.network/143380 Ben Hutchings suggested we take a hint from how we handle the sockaddr_storage type. First we define a "__kernel_sa_family_t" to linux/socket.h that is always defined. Then if __KERNEL__ is defined, we also define "sa_family_t" as equal to "__kernel_sa_family_t". Then in places like linux/netlink.h we use __kernel_sa_family_t in user visible datastructures. Reported-by: Michel Machado Signed-off-by: David S. Miller diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 2e17c5d..180540a 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -29,7 +29,7 @@ #define MAX_LINKS 32 struct sockaddr_nl { - sa_family_t nl_family; /* AF_NETLINK */ + __kernel_sa_family_t nl_family; /* AF_NETLINK */ unsigned short nl_pad; /* zero */ __u32 nl_pid; /* port ID */ __u32 nl_groups; /* multicast groups mask */ diff --git a/include/linux/socket.h b/include/linux/socket.h index e17f822..d0e77f6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -8,8 +8,10 @@ #define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) /* Implementation specific desired alignment */ +typedef unsigned short __kernel_sa_family_t; + struct __kernel_sockaddr_storage { - unsigned short ss_family; /* address family */ + __kernel_sa_family_t ss_family; /* address family */ /* Following field(s) are implementation specific */ char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; /* space to achieve desired size, */ @@ -35,7 +37,7 @@ struct seq_file; extern void socket_seq_show(struct seq_file *seq); #endif -typedef unsigned short sa_family_t; +typedef __kernel_sa_family_t sa_family_t; /* * 1003.1g requires sa_family_t and that sa_data is char. -- cgit v0.10.2 From 797fd3913abf2f7036003ab8d3d019cbea41affd Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 7 Aug 2011 09:11:00 +0000 Subject: netfilter: TCP and raw fix for ip_route_me_harder TCP in some cases uses different global (raw) socket to send RST and ACK. The transparent flag is not set there. Currently, it is a problem for rerouting after the previous change. Fix it by simplifying the checks in ip_route_me_harder and use FLOWI_FLAG_ANYSRC even for sockets. It looks safe because the initial routing allowed this source address to be used and now we just have to make sure the packet is rerouted. As a side effect this also allows rerouting for normal raw sockets that use spoofed source addresses which was not possible even before we eliminated the ip_route_input call. Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 2e97e3e..929b27b 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -18,17 +18,15 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - __u8 flags = 0; + __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; unsigned int hh_len; - if (!skb->sk && addr_type != RTN_LOCAL) { - if (addr_type == RTN_UNSPEC) - addr_type = inet_addr_type(net, saddr); - if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) - flags |= FLOWI_FLAG_ANYSRC; - else - saddr = 0; - } + if (addr_type == RTN_UNSPEC) + addr_type = inet_addr_type(net, saddr); + if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) + flags |= FLOWI_FLAG_ANYSRC; + else + saddr = 0; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. @@ -38,7 +36,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; fl4.flowi4_mark = skb->mark; - fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags; + fl4.flowi4_flags = flags; rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return -1; -- cgit v0.10.2 From 47670b767b1593433b516df7798df03f858278be Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 7 Aug 2011 09:16:09 +0000 Subject: ipv4: route non-local sources for raw socket The raw sockets can provide source address for routing but their privileges are not considered. We can provide non-local source address, make sure the FLOWI_FLAG_ANYSRC flag is set if socket has privileges for this, i.e. based on hdrincl (IP_HDRINCL) and transparent flags. Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index caaff5f..b897d6e 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -238,7 +238,7 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) { __u8 flags = 0; - if (inet_sk(sk)->transparent) + if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl) flags |= FLOWI_FLAG_ANYSRC; if (sk->sk_protocol == IPPROTO_TCP) flags |= FLOWI_FLAG_PRECOW_METRICS; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 1457acb..61714bd 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -563,7 +563,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, - FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0); + inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, + daddr, saddr, 0, 0); if (!inet->hdrincl) { err = raw_probe_proto_opt(&fl4, msg); -- cgit v0.10.2 From d52fbfc9e5c7bb0b0dbc256edf17dee170ce839d Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 7 Aug 2011 10:17:22 +0000 Subject: ipv4: use dst with ref during bcast/mcast loopback Make sure skb dst has reference when moving to another context. Currently, I don't see protocols that can hit it when sending broadcasts/multicasts to loopback using noref dsts, so it is just a precaution. Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 77d3ede..8c65633 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -122,6 +122,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) newskb->pkt_type = PACKET_LOOPBACK; newskb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(newskb)); + skb_dst_force(newskb); netif_rx_ni(newskb); return 0; } -- cgit v0.10.2 From c2e2a313ff8fdc25cedef5e63da712a6a0d35dfe Mon Sep 17 00:00:00 2001 From: huajun li Date: Sun, 7 Aug 2011 03:03:31 +0000 Subject: rtl8150: rtl8150_disconnect(...) does not need tasklet_disable(...) Executing cmd 'rmmod rtl8150' does not return(if your device connects to host), the root cause is tasklet_disable() causes tasklet_kill() block, remove it from rtl8150_disconnect(). Signed-off-by: Huajun Li Signed-off-by: David S. Miller diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 041fb7d..ef3b236 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -977,7 +977,6 @@ static void rtl8150_disconnect(struct usb_interface *intf) usb_set_intfdata(intf, NULL); if (dev) { set_bit(RTL8150_UNPLUG, &dev->flags); - tasklet_disable(&dev->tl); tasklet_kill(&dev->tl); unregister_netdev(dev->netdev); unlink_all_urbs(dev); -- cgit v0.10.2 From cbc056602c7c63620c86904c431ff6b61e029dcc Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sat, 6 Aug 2011 21:03:03 +0000 Subject: gianfar: fix fiper alignment after resetting the time After resetting the time, the PPS signals on the FIPER output channels are incorrectly offset from the clock time, as can be readily verified by a looping back the FIPER to the external time stamp input. Despite its name, setting the "Fiper Realignment Disable" bit seems to fix the problem, at least on the P2020. Also, following the example code from the Freescale BSP, it is not really necessary to disable and re-enable the timer in order to reprogram the FIPER. (The documentation is rather unclear on this point. It seems that writing to the alarm register also disables the FIPER.) Signed-off-by: Richard Cochran Cc: Signed-off-by: David S. Miller diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c index 1c97861..f67b8ae 100644 --- a/drivers/net/gianfar_ptp.c +++ b/drivers/net/gianfar_ptp.c @@ -193,14 +193,9 @@ static void set_alarm(struct etsects *etsects) /* Caller must hold etsects->lock. */ static void set_fipers(struct etsects *etsects) { - u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); - - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE)); - gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); + set_alarm(etsects); gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE); } /* @@ -511,7 +506,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE|FRD); spin_unlock_irqrestore(&etsects->lock, flags); -- cgit v0.10.2 From 8028837d71ba9904b17281b40f94b93e947fbe38 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sat, 6 Aug 2011 21:03:04 +0000 Subject: dp83640: increase receive time stamp buffer size The dp83640 buffers receive time stamps from special PHY status frames, matching them to received PTP packets in a work queue. Because the timeout for orphaned time stamps is so long and the buffer is so small, the driver can drop time stamps under moderate PTP traffic. This commit fixes the issue by decreasing the timeout to (at least) one timer tick and increasing the buffer size. Signed-off-by: Richard Cochran Cc: Signed-off-by: David S. Miller diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 2cd8dc5..cb6e0b4 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -34,8 +34,7 @@ #define PAGESEL 0x13 #define LAYER4 0x02 #define LAYER2 0x01 -#define MAX_RXTS 4 -#define MAX_TXTS 4 +#define MAX_RXTS 64 #define N_EXT_TS 1 #define PSF_PTPVER 2 #define PSF_EVNT 0x4000 @@ -218,7 +217,7 @@ static void phy2rxts(struct phy_rxts *p, struct rxts *rxts) rxts->seqid = p->seqid; rxts->msgtype = (p->msgtype >> 12) & 0xf; rxts->hash = p->msgtype & 0x0fff; - rxts->tmo = jiffies + HZ; + rxts->tmo = jiffies + 2; } static u64 phy2txts(struct phy_txts *p) -- cgit v0.10.2 From 546fb6cbde1d990af3e28e2f6d7061ef4eef25bf Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Thu, 4 Aug 2011 16:47:35 +0100 Subject: ARM: mx5: board-cpuimx51.c fixup irq_to_gpio() usage irq_to_gpio() is being called on a GPIO so change to using gpio_to_irq() instead. Signed-off-by: Ben Dooks Signed-off-by: Sascha Hauer diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c index 7c893fa..68934ea 100644 --- a/arch/arm/mach-mx5/board-cpuimx51.c +++ b/arch/arm/mach-mx5/board-cpuimx51.c @@ -81,7 +81,7 @@ static struct plat_serial8250_port serial_platform_data[] = { .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, }, { .mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000), - .irq = irq_to_gpio(CPUIMX51_QUARTD_GPIO), + .irq = gpio_to_irq(CPUIMX51_QUARTD_GPIO), .irqflags = IRQF_TRIGGER_HIGH, .uartclk = CPUIMX51_QUART_XTAL, .regshift = CPUIMX51_QUART_REGSHIFT, -- cgit v0.10.2 From d9c927833a42b4eaae4addd031f780f4530f7a2d Mon Sep 17 00:00:00 2001 From: "Arnaud Patard (Rtp)" Date: Fri, 5 Aug 2011 09:32:41 +0200 Subject: iMX: Fix build for iMX53 Commit fad107086d5a869c1c07e5bb35b7b57a10ecf578 fixed the wrong test for MX51 as the MX51 addresses are wrong for MX50 and MX53 but now it's MX51 only, UART_PADDR is not defined anymore when building for MX50/MX53. Signed-off-by: Arnaud Patard Tested-by: Steev Klimaszewski Signed-off-by: Sascha Hauer diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S index 91fc7cd..e4dde91 100644 --- a/arch/arm/plat-mxc/include/mach/debug-macro.S +++ b/arch/arm/plat-mxc/include/mach/debug-macro.S @@ -44,6 +44,14 @@ #define UART_PADDR MX51_UART1_BASE_ADDR #endif +/* iMX50/53 have same addresses, but not iMX51 */ +#if defined(CONFIG_SOC_IMX50) || defined(CONFIG_SOC_IMX53) +#ifdef UART_PADDR +#error "CONFIG_DEBUG_LL is incompatible with multiple archs" +#endif +#define UART_PADDR MX53_UART1_BASE_ADDR +#endif + #define UART_VADDR IMX_IO_ADDRESS(UART_PADDR) .macro addruart, rp, rv -- cgit v0.10.2 From 0584ffa548b6e59aceb027112f23a55f0133400e Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 8 Aug 2011 12:24:46 +0200 Subject: ALSA: timer - Fix Oops at closing slave timer A slave-timer instance has no timer reference, and this results in NULL-dereference at stopping the timer, typically called at closing the device. Reference: https://bugzilla.kernel.org/show_bug.cgi?id=40682 Cc: Signed-off-by: Takashi Iwai diff --git a/sound/core/timer.c b/sound/core/timer.c index 7c1cbf0..950eed0 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -531,6 +531,8 @@ int snd_timer_stop(struct snd_timer_instance *timeri) if (err < 0) return err; timer = timeri->timer; + if (!timer) + return -EINVAL; spin_lock_irqsave(&timer->lock, flags); timeri->cticks = timeri->ticks; timeri->pticks = 0; -- cgit v0.10.2 From 94094c8aaeded11f8b99734b9ebdaada20b5f24a Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 8 Aug 2011 12:28:22 +0200 Subject: ALSA: timer - Add NULL-check for invalid slave timer Just to be sure. Signed-off-by: Takashi Iwai diff --git a/sound/core/timer.c b/sound/core/timer.c index 950eed0..67ebf1c 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -328,6 +328,8 @@ int snd_timer_close(struct snd_timer_instance *timeri) mutex_unlock(®ister_mutex); } else { timer = timeri->timer; + if (snd_BUG_ON(!timer)) + goto out; /* wait, until the active callback is finished */ spin_lock_irq(&timer->lock); while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) { @@ -353,6 +355,7 @@ int snd_timer_close(struct snd_timer_instance *timeri) } mutex_unlock(®ister_mutex); } + out: if (timeri->private_free) timeri->private_free(timeri); kfree(timeri->owner); -- cgit v0.10.2 From 8039290a91c5dc4414093c086987a5d7738fe2fd Mon Sep 17 00:00:00 2001 From: Wang Shaoyan Date: Mon, 8 Aug 2011 19:10:26 +0800 Subject: sound: pss - don't use the deprecated function check_region sound/oss/pss.c: In function 'configure_nonsound_components': sound/oss/pss.c:676: warning: 'check_region' is deprecated (declared at include/linux/ioport.h:201) Signed-off-by: Wang Shaoyan Signed-off-by: Takashi Iwai diff --git a/sound/oss/pss.c b/sound/oss/pss.c index 9b800ce..2fc0624 100644 --- a/sound/oss/pss.c +++ b/sound/oss/pss.c @@ -673,7 +673,8 @@ static void configure_nonsound_components(void) if (pss_cdrom_port == -1) { /* If cdrom port enablation wasn't requested */ printk(KERN_INFO "PSS: CDROM port not enabled.\n"); - } else if (check_region(pss_cdrom_port, 2)) { + } else if (!request_region(pss_cdrom_port, 2, "PSS CDROM")) { + pss_cdrom_port = -1; printk(KERN_ERR "PSS: CDROM I/O port conflict.\n"); } else { set_io_base(devc, CONF_CDROM, pss_cdrom_port); @@ -1232,7 +1233,8 @@ static void __exit cleanup_pss(void) if(pssmpu) unload_pss_mpu(&cfg_mpu); unload_pss(&cfg); - } + } else if (pss_cdrom_port != -1) + release_region(pss_cdrom_port, 2); if(!pss_keep_settings) /* Keep hardware settings if asked */ { -- cgit v0.10.2 From ce27a443d17dccf613079a7147cf0d220bc4ec82 Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Mon, 25 Jul 2011 22:08:08 +0800 Subject: perf probe: Fix coredump introduced by probe module option perf will coredump if the user doesn't give the "-m" option in probe command, this patch fixes it. [root@localhost perf]# ./perf probe --add='PROBE' Segmentation fault (core dumped) Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1311602888-2389-1-git-send-email-bookjovi@gmail.com Signed-off-by: Jovi Zhang Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index b82d54f..1c7bfa5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -1820,11 +1820,15 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev, ret = -ENOMEM; goto error; } - tev->point.module = strdup(module); - if (tev->point.module == NULL) { - ret = -ENOMEM; - goto error; + + if (module) { + tev->point.module = strdup(module); + if (tev->point.module == NULL) { + ret = -ENOMEM; + goto error; + } } + tev->point.offset = pev->point.offset; tev->point.retprobe = pev->point.retprobe; tev->nargs = pev->nargs; -- cgit v0.10.2 From cf8dc9ff29b55955197ae6f628b19f7f41f6e582 Mon Sep 17 00:00:00 2001 From: Zhu Yanhai Date: Sat, 30 Jul 2011 22:13:52 +0800 Subject: perf lock: Dropping unsupported ':r' modifier Looks to me like the :r modifier is not supported anymore, so remove it from the list of events. Without this fix 'perf lock record' doesn't work. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Zhu Yanhai Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1312035232-9534-1-git-send-email-gaoyang.zyh@taobao.com Signed-off-by: Zhu Yanhai Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 9ac05aa..899080a 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -942,10 +942,10 @@ static const char *record_args[] = { "-f", "-m", "1024", "-c", "1", - "-e", "lock:lock_acquire:r", - "-e", "lock:lock_acquired:r", - "-e", "lock:lock_contended:r", - "-e", "lock:lock_release:r", + "-e", "lock:lock_acquire", + "-e", "lock:lock_acquired", + "-e", "lock:lock_contended", + "-e", "lock:lock_release", }; static int __cmd_record(int argc, const char **argv) -- cgit v0.10.2 From 8b7e0b34b8e94f34597e4b804bbb8bb7e27df040 Mon Sep 17 00:00:00 2001 From: Kusanagi Kouichi Date: Sun, 7 Aug 2011 17:39:31 +0900 Subject: perf tools: Make clean leaves some files Use LIB_OBJS and BUILTIN_OBJS for .o files. LIB_FILE is already prefixed with OUTPUT. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110807083932.9C0E514C03B@msa103.auone-net.jp Signed-off-by: Kusanagi Kouichi Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index d0861bb..5bf48fc 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -892,7 +892,7 @@ quick-install-html: ### Cleaning rules clean: - $(RM) $(OUTPUT){*.o,*/*.o,*/*/*.o,*/*/*/*.o,$(LIB_FILE),perf-archive} + $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(RM) $(ALL_PROGRAMS) perf $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(MAKE) -C Documentation/ clean -- cgit v0.10.2 From aba8d056078e47350d85b06a9cabd5afcc4b72ea Mon Sep 17 00:00:00 2001 From: Jonathan Nieder Date: Fri, 5 Aug 2011 18:58:38 +0200 Subject: perf tools: do not look at ./config for configuration In addition to /etc/perfconfig and $HOME/.perfconfig, perf looks for configuration in the file ./config, imitating git which looks at $GIT_DIR/config. If ./config is not a perf configuration file, it fails, or worse, treats it as a configuration file and changes behavior in some unexpected way. "config" is not an unusual name for a file to be lying around and perf does not have a private directory dedicated for its own use, so let's just stop looking for configuration in the cwd. Callers needing context-sensitive configuration can use the PERF_CONFIG environment variable. Requested-by: Christian Ohm Cc: 632923@bugs.debian.org Cc: Ben Hutchings Cc: Christian Ohm Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110805165838.GA7237@elie.gateway.2wire.net Signed-off-by: Jonathan Nieder Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index e02d78c..6c86eca 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -399,7 +399,6 @@ static int perf_config_global(void) int perf_config(config_fn_t fn, void *data) { int ret = 0, found = 0; - char *repo_config = NULL; const char *home = NULL; /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */ @@ -421,12 +420,6 @@ int perf_config(config_fn_t fn, void *data) free(user_config); } - repo_config = perf_pathdup("config"); - if (!access(repo_config, R_OK)) { - ret += perf_config_from_file(fn, repo_config, data); - found += 1; - } - free(repo_config); if (found == 0) return -1; return ret; -- cgit v0.10.2 From 37fb3a30b46237f23cfdf7ee09d49f9888dd13bf Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 8 Aug 2011 16:08:08 +0200 Subject: fuse: fix flock Commit a9ff4f87 "fuse: support BSD locking semantics" overlooked a number of issues with supporing flock locks over existing POSIX locking infrastructure: - it's not backward compatible, passing flock(2) calls to userspace unconditionally (if userspace sets FUSE_POSIX_LOCKS) - it doesn't cater for the fact that flock locks are automatically unlocked on file release - it doesn't take into account the fact that flock exclusive locks (write locks) don't need an fd opened for write. The last one invalidates the original premise of the patch that flock locks can be emulated with POSIX locks. This patch fixes the first two issues. The last one needs to be fixed in userspace if the filesystem assumed that a write lock will happen only on a file operned for write (as in the case of the current fuse library). Reported-by: Sebastian Pipping Signed-off-by: Miklos Szeredi diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 82a6646..e327849 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -245,6 +245,12 @@ void fuse_release_common(struct file *file, int opcode) req = ff->reserved_req; fuse_prepare_release(ff, file->f_flags, opcode); + if (ff->flock) { + struct fuse_release_in *inarg = &req->misc.release.in; + inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; + inarg->lock_owner = fuse_lock_owner_id(ff->fc, + (fl_owner_t) file); + } /* Hold vfsmount and dentry until release is finished */ path_get(&file->f_path); req->misc.release.path = file->f_path; @@ -1547,11 +1553,14 @@ static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl) struct fuse_conn *fc = get_fuse_conn(inode); int err; - if (fc->no_lock) { + if (fc->no_flock) { err = flock_lock_file_wait(file, fl); } else { + struct fuse_file *ff = file->private_data; + /* emulate flock with POSIX locks */ fl->fl_owner = (fl_owner_t) file; + ff->flock = true; err = fuse_setlk(file, fl, 1); } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index b788bec..eb8c613 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -135,6 +135,9 @@ struct fuse_file { /** Wait queue head for poll */ wait_queue_head_t poll_wait; + + /** Has flock been performed on this file? */ + bool flock:1; }; /** One input argument of a request */ @@ -448,7 +451,7 @@ struct fuse_conn { /** Is removexattr not implemented by fs? */ unsigned no_removexattr:1; - /** Are file locking primitives not implemented by fs? */ + /** Are posix file locking primitives not implemented by fs? */ unsigned no_lock:1; /** Is access not implemented by fs? */ @@ -472,6 +475,9 @@ struct fuse_conn { /** Don't apply umask to creation modes */ unsigned dont_mask:1; + /** Are BSD file locking primitives not implemented by fs? */ + unsigned no_flock:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 5354906e..f541d63 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -809,6 +809,10 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) fc->no_lock = 1; + if (arg->minor >= 17) { + if (!(arg->flags & FUSE_FLOCK_LOCKS)) + fc->no_flock = 1; + } if (arg->flags & FUSE_ATOMIC_O_TRUNC) fc->atomic_o_trunc = 1; if (arg->minor >= 9) { @@ -823,6 +827,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; + fc->no_flock = 1; } fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages); @@ -843,7 +848,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->minor = FUSE_KERNEL_MINOR_VERSION; arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | - FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK; + FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | + FUSE_FLOCK_LOCKS; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); diff --git a/include/linux/fuse.h b/include/linux/fuse.h index d464de5..464cff5 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -47,6 +47,9 @@ * - FUSE_IOCTL_UNRESTRICTED shall now return with array of 'struct * fuse_ioctl_iovec' instead of ambiguous 'struct iovec' * - add FUSE_IOCTL_32BIT flag + * + * 7.17 + * - add FUSE_FLOCK_LOCKS and FUSE_RELEASE_FLOCK_UNLOCK */ #ifndef _LINUX_FUSE_H @@ -78,7 +81,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 16 +#define FUSE_KERNEL_MINOR_VERSION 17 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -153,8 +156,10 @@ struct fuse_file_lock { /** * INIT request/reply flags * + * FUSE_POSIX_LOCKS: remote locking for POSIX file locks * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." * FUSE_DONT_MASK: don't apply umask to file mode on create operations + * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -163,6 +168,7 @@ struct fuse_file_lock { #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) #define FUSE_DONT_MASK (1 << 6) +#define FUSE_FLOCK_LOCKS (1 << 10) /** * CUSE INIT request/reply flags @@ -175,6 +181,7 @@ struct fuse_file_lock { * Release flags */ #define FUSE_RELEASE_FLUSH (1 << 0) +#define FUSE_RELEASE_FLOCK_UNLOCK (1 << 1) /** * Getattr flags -- cgit v0.10.2 From b40cdd56dfa065c0832905e266b39f79419e6914 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 25 Jul 2011 22:35:34 +0200 Subject: fuse: delete dead .write_begin and .write_end aops Ever since 'ea9b990 fuse: implement perform_write', the .write_begin and .write_end aops have been dead code. Their task - acquiring a page from the page cache, sending out a write request and releasing the page again - is now done batch-wise to maximize the number of pages send per userspace request. Signed-off-by: Johannes Weiner Signed-off-by: Miklos Szeredi diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e327849..ab5b84e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -749,18 +749,6 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file, return req->misc.write.out.size; } -static int fuse_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -{ - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - - *pagep = grab_cache_page_write_begin(mapping, index, flags); - if (!*pagep) - return -ENOMEM; - return 0; -} - void fuse_write_update_size(struct inode *inode, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(inode); @@ -773,62 +761,6 @@ void fuse_write_update_size(struct inode *inode, loff_t pos) spin_unlock(&fc->lock); } -static int fuse_buffered_write(struct file *file, struct inode *inode, - loff_t pos, unsigned count, struct page *page) -{ - int err; - size_t nres; - struct fuse_conn *fc = get_fuse_conn(inode); - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); - struct fuse_req *req; - - if (is_bad_inode(inode)) - return -EIO; - - /* - * Make sure writepages on the same page are not mixed up with - * plain writes. - */ - fuse_wait_on_page_writeback(inode, page->index); - - req = fuse_get_req(fc); - if (IS_ERR(req)) - return PTR_ERR(req); - - req->in.argpages = 1; - req->num_pages = 1; - req->pages[0] = page; - req->page_offset = offset; - nres = fuse_send_write(req, file, pos, count, NULL); - err = req->out.h.error; - fuse_put_request(fc, req); - if (!err && !nres) - err = -EIO; - if (!err) { - pos += nres; - fuse_write_update_size(inode, pos); - if (count == PAGE_CACHE_SIZE) - SetPageUptodate(page); - } - fuse_invalidate_attr(inode); - return err ? err : nres; -} - -static int fuse_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) -{ - struct inode *inode = mapping->host; - int res = 0; - - if (copied) - res = fuse_buffered_write(file, inode, pos, copied, page); - - unlock_page(page); - page_cache_release(page); - return res; -} - static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, struct inode *inode, loff_t pos, size_t count) @@ -2181,8 +2113,6 @@ static const struct address_space_operations fuse_file_aops = { .readpage = fuse_readpage, .writepage = fuse_writepage, .launder_page = fuse_launder_page, - .write_begin = fuse_write_begin, - .write_end = fuse_write_end, .readpages = fuse_readpages, .set_page_dirty = __set_page_dirty_nobuffers, .bmap = fuse_bmap, -- cgit v0.10.2 From 478e0841b3dce3edc2c67bf0fc51af30f582e9e2 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Mon, 25 Jul 2011 22:35:35 +0200 Subject: fuse: mark pages accessed when written to As fuse does not use the page cache library functions when userspace writes to a file, it did not benefit from 'c8236db mm: mark page accessed before we write_end()' that made sure pages are properly marked accessed when written to. Signed-off-by: Johannes Weiner Signed-off-by: Miklos Szeredi diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ab5b84e..7155f49 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -14,6 +14,7 @@ #include #include #include +#include static const struct file_operations fuse_direct_io_file_operations; @@ -834,6 +835,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, pagefault_enable(); flush_dcache_page(page); + mark_page_accessed(page); + if (!tmp) { unlock_page(page); page_cache_release(page); -- cgit v0.10.2 From 9941c96ad869d10f7e34e03990ce450ab8fcb83d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 22 Jul 2011 13:33:07 +0200 Subject: perf tools: Add support to install perf python extension Adding install-python_ext target to install python extension related files. Installation directory is governed by python distutils package and follows the DESTDIR variable settings. Also moving python extension build output into '$(O)python_ext_build' directory and making it configurable via PYTHON_EXTBUILD variable. Keeping the '$(O)python/perf.so' file, so it could be used for testing as of until now. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110722113307.GA1931@jolsa.brq.redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 5bf48fc..822f967 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -178,9 +178,9 @@ strip-libs = $(filter-out -l%,$(1)) $(OUTPUT)python/perf.so: $(PYRF_OBJS) $(QUIET_GEN)CFLAGS='$(BASIC_CFLAGS)' $(PYTHON_WORD) util/setup.py \ - --quiet build_ext \ - --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' + --quiet build_ext; \ + mkdir -p $(OUTPUT)python && \ + cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/ # # No Perl scripts right now: # @@ -506,9 +506,13 @@ else PYTHON_WORD := $(call shell-wordify,$(PYTHON)) - python-clean := $(PYTHON_WORD) util/setup.py clean \ - --build-lib='$(OUTPUT)python' \ - --build-temp='$(OUTPUT)python/temp' + # python extension build directories + PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/ + PYTHON_EXTBUILD_LIB := $(PYTHON_EXTBUILD)lib/ + PYTHON_EXTBUILD_TMP := $(PYTHON_EXTBUILD)tmp/ + export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP + + python-clean := rm -rf $(PYTHON_EXTBUILD) $(OUTPUT)python/perf.so ifdef NO_LIBPYTHON $(call disable-python) @@ -865,6 +869,9 @@ install: all $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' +install-python_ext: + $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' + install-doc: $(MAKE) -C Documentation install diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index bbc982f..95d3700 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -3,9 +3,27 @@ from distutils.core import setup, Extension from os import getenv +from distutils.command.build_ext import build_ext as _build_ext +from distutils.command.install_lib import install_lib as _install_lib + +class build_ext(_build_ext): + def finalize_options(self): + _build_ext.finalize_options(self) + self.build_lib = build_lib + self.build_temp = build_tmp + +class install_lib(_install_lib): + def finalize_options(self): + _install_lib.finalize_options(self) + self.build_dir = build_lib + + cflags = ['-fno-strict-aliasing', '-Wno-write-strings'] cflags += getenv('CFLAGS', '').split() +build_lib = getenv('PYTHON_EXTBUILD_LIB') +build_tmp = getenv('PYTHON_EXTBUILD_TMP') + perf = Extension('perf', sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c', 'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c', @@ -21,4 +39,5 @@ setup(name='perf', author_email='acme@redhat.com', license='GPLv2', url='http://perf.wiki.kernel.org', - ext_modules=[perf]) + ext_modules=[perf], + cmdclass={'build_ext': build_ext, 'install_lib': install_lib}) -- cgit v0.10.2 From 27e4e4362756a78b15e83ef104c8bbe257f40f90 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 8 Aug 2011 15:54:53 +0100 Subject: CRED: Restore const to current_cred() Commit 3295514841c2 ("fix rcu annotations noise in cred.h") accidentally dropped the const of current->cred inside current_cred() by the insertion of a cast to deal with an RCU annotation loss warning from sparce. Use an appropriate RCU wrapper instead so as not to lose the const. Signed-off-by: David Howells Reviewed-by: Paul E. McKenney cc: Al Viro Signed-off-by: Linus Torvalds diff --git a/include/linux/cred.h b/include/linux/cred.h index 98f46ef..8e2fd44 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -269,7 +269,7 @@ static inline void put_cred(const struct cred *_cred) * since nobody else can modify it. */ #define current_cred() \ - (*(__force struct cred **)¤t->cred) + rcu_dereference_protected(current->cred, 1) /** * __task_cred - Access a task's objective credentials -- cgit v0.10.2 From 638a8439096c582bdb523fcea9d875d3e1fed38a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 8 Aug 2011 11:33:23 -0700 Subject: cred: use 'const' in get_current_{user,groups} Avoid annoying warnings from these functions ("discards qualifiers") because they assign 'current_cred()' to a non-const pointer. Signed-off-by: Linus Torvalds diff --git a/include/linux/cred.h b/include/linux/cred.h index 8e2fd44..4030896 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -307,7 +307,7 @@ static inline void put_cred(const struct cred *_cred) #define get_current_user() \ ({ \ struct user_struct *__u; \ - struct cred *__cred; \ + const struct cred *__cred; \ __cred = current_cred(); \ __u = get_uid(__cred->user); \ __u; \ @@ -322,7 +322,7 @@ static inline void put_cred(const struct cred *_cred) #define get_current_groups() \ ({ \ struct group_info *__groups; \ - struct cred *__cred; \ + const struct cred *__cred; \ __cred = current_cred(); \ __groups = get_group_info(__cred->group_info); \ __groups; \ -- cgit v0.10.2 From c3ad996246dc5fd6e3df473c5fc1ba6d53e1d402 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 8 Aug 2011 11:35:17 -0700 Subject: autofs4: clean up uaotfs use of debug/info/warning printouts Use 'pr_debug()' for DPRINTK, which will do the proper type checking on the arguments (without generating code) even when DEBUG isn't #defined. Also, use the standard __VA_ARGS__ for the macros, and stop the pointless abuse of 'do { xyz } while (0)' when the macro is already a perfectly well-formed single statement. Reported-by: David Howells Suggested-by: Joe Perches Cc: Ian Kent Signed-off-by: Linus Torvalds diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 475f9c5..326dc08 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -39,27 +39,17 @@ /* #define DEBUG */ -#ifdef DEBUG -#define DPRINTK(fmt, args...) \ -do { \ - printk(KERN_DEBUG "pid %d: %s: " fmt "\n", \ - current->pid, __func__, ##args); \ -} while (0) -#else -#define DPRINTK(fmt, args...) do {} while (0) -#endif - -#define AUTOFS_WARN(fmt, args...) \ -do { \ +#define DPRINTK(fmt, ...) \ + pr_debug("pid %d: %s: " fmt "\n", \ + current->pid, __func__, ##__VA_ARGS__) + +#define AUTOFS_WARN(fmt, ...) \ printk(KERN_WARNING "pid %d: %s: " fmt "\n", \ - current->pid, __func__, ##args); \ -} while (0) + current->pid, __func__, ##__VA_ARGS__) -#define AUTOFS_ERROR(fmt, args...) \ -do { \ +#define AUTOFS_ERROR(fmt, ...) \ printk(KERN_ERR "pid %d: %s: " fmt "\n", \ - current->pid, __func__, ##args); \ -} while (0) + current->pid, __func__, ##__VA_ARGS__) /* Unified info structure. This is pointed to by both the dentry and inode structures. Each file in the filesystem has an instance of this -- cgit v0.10.2 From 2f84dd70916ccadd25e94d28363182a978f569b6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 8 Aug 2011 11:55:20 -0700 Subject: autofs4: fix debug printk warning uncovered by cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous comit made the autofs4 debug printouts check types against the printout format, and uncovered this bug: fs/autofs4/waitq.c:106:2: warning: format ‘%08lx’ expects type ‘long unsigned int’, but argument 4 has type ‘autofs_wqt_t’ which is due to the insane type for wait_queue_token. That thing should be some fixed well-defined size (preferably just 'unsigned int' or 'u32') but for unexplained reasons it is randomly either 'unsigned long' or 'unsigned int' depending on the architecture. For now, cast it to 'unsigned long' for printing, the way we do elsewhere. Somebody else can try to explain the typedef mess. (There's a reason we don't support excessive use of typedefs in the kernel: it's usually just a good way of confusing yourself). Signed-off-by: Linus Torvalds diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 2543598..e1fbdee 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, size_t pktsz; DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d", - wq->wait_queue_token, wq->name.len, wq->name.name, type); + (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); memset(&pkt,0,sizeof pkt); /* For security reasons */ -- cgit v0.10.2 From f704648281831fbb8a4ca1acbe18cb84bc0267c8 Mon Sep 17 00:00:00 2001 From: wwang Date: Wed, 3 Aug 2011 16:00:25 +0800 Subject: staging:rts_pstor: fix thread synchronization flow Using different completion variables to synchronize different kernel threads This patch fix a bug that may cause memory leak when driver disconnected. This is not a very urgent bug. Because with the default setting, driver disconnectting routine won't be called except when Linux is shut down. But if the option auto_delink_en is set, a small number of memory would leak out after memory card unplugged. Signed-off-by: wwang Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c index 5ff59f2..16c73fb 100644 --- a/drivers/staging/rts_pstor/rtsx.c +++ b/drivers/staging/rts_pstor/rtsx.c @@ -66,12 +66,6 @@ static int msi_en; module_param(msi_en, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(msi_en, "enable msi"); -/* These are used to make sure the module doesn't unload before all the - * threads have exited. - */ -static atomic_t total_threads = ATOMIC_INIT(0); -static DECLARE_COMPLETION(threads_gone); - static irqreturn_t rtsx_interrupt(int irq, void *dev_id); /*********************************************************************** @@ -192,7 +186,7 @@ static int queuecommand_lck(struct scsi_cmnd *srb, /* enqueue the command and wake up the control thread */ srb->scsi_done = done; chip->srb = srb; - up(&(dev->sema)); + complete(&dev->cmnd_ready); return 0; } @@ -475,7 +469,7 @@ static int rtsx_control_thread(void *__dev) current->flags |= PF_NOFREEZE; for (;;) { - if (down_interruptible(&dev->sema)) + if (wait_for_completion_interruptible(&dev->cmnd_ready)) break; /* lock the device pointers */ @@ -557,8 +551,6 @@ SkipForAbort: mutex_unlock(&dev->dev_mutex); } /* for (;;) */ - scsi_host_put(host); - /* notify the exit routine that we're actually exiting now * * complete()/wait_for_completion() is similar to up()/down(), @@ -573,7 +565,7 @@ SkipForAbort: * This is important in preemption kernels, which transfer the flow * of execution immediately upon a complete(). */ - complete_and_exit(&threads_gone, 0); + complete_and_exit(&dev->control_exit, 0); } @@ -581,7 +573,6 @@ static int rtsx_polling_thread(void *__dev) { struct rtsx_dev *dev = (struct rtsx_dev *)__dev; struct rtsx_chip *chip = dev->chip; - struct Scsi_Host *host = rtsx_to_host(dev); struct sd_info *sd_card = &(chip->sd_card); struct xd_info *xd_card = &(chip->xd_card); struct ms_info *ms_card = &(chip->ms_card); @@ -621,8 +612,7 @@ static int rtsx_polling_thread(void *__dev) mutex_unlock(&dev->dev_mutex); } - scsi_host_put(host); - complete_and_exit(&threads_gone, 0); + complete_and_exit(&dev->polling_exit, 0); } /* @@ -699,29 +689,38 @@ static void rtsx_release_resources(struct rtsx_dev *dev) { printk(KERN_INFO "-- %s\n", __func__); + /* Tell the control thread to exit. The SCSI host must + * already have been removed so it won't try to queue + * any more commands. + */ + printk(KERN_INFO "-- sending exit command to thread\n"); + complete(&dev->cmnd_ready); + if (dev->ctl_thread) + wait_for_completion(&dev->control_exit); + if (dev->polling_thread) + wait_for_completion(&dev->polling_exit); + + wait_timeout(200); + if (dev->rtsx_resv_buf) { - dma_free_coherent(&(dev->pci->dev), HOST_CMDS_BUF_LEN, + dma_free_coherent(&(dev->pci->dev), RTSX_RESV_BUF_LEN, dev->rtsx_resv_buf, dev->rtsx_resv_buf_addr); dev->chip->host_cmds_ptr = NULL; dev->chip->host_sg_tbl_ptr = NULL; } - pci_disable_device(dev->pci); - pci_release_regions(dev->pci); - - if (dev->irq > 0) { + if (dev->irq > 0) free_irq(dev->irq, (void *)dev); - } - if (dev->chip->msi_en) { + if (dev->chip->msi_en) pci_disable_msi(dev->pci); - } + if (dev->remap_addr) + iounmap(dev->remap_addr); - /* Tell the control thread to exit. The SCSI host must - * already have been removed so it won't try to queue - * any more commands. - */ - printk(KERN_INFO "-- sending exit command to thread\n"); - up(&dev->sema); + pci_disable_device(dev->pci); + pci_release_regions(dev->pci); + + rtsx_release_chip(dev->chip); + kfree(dev->chip); } /* First stage of disconnect processing: stop all commands and remove @@ -739,6 +738,7 @@ static void quiesce_and_remove_host(struct rtsx_dev *dev) scsi_unlock(host); mutex_unlock(&dev->dev_mutex); wake_up(&dev->delay_wait); + wait_for_completion(&dev->scanning_done); /* Wait some time to let other threads exist */ wait_timeout(100); @@ -793,8 +793,7 @@ static int rtsx_scan_thread(void *__dev) /* Should we unbind if no devices were detected? */ } - scsi_host_put(rtsx_to_host(dev)); - complete_and_exit(&threads_gone, 0); + complete_and_exit(&dev->scanning_done, 0); } static void rtsx_init_options(struct rtsx_chip *chip) @@ -941,8 +940,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id spin_lock_init(&dev->reg_lock); mutex_init(&(dev->dev_mutex)); - sema_init(&(dev->sema), 0); + init_completion(&dev->cmnd_ready); + init_completion(&dev->control_exit); + init_completion(&dev->polling_exit); init_completion(&(dev->notify)); + init_completion(&dev->scanning_done); init_waitqueue_head(&dev->delay_wait); dev->pci = pci; @@ -992,28 +994,22 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id pci_set_master(pci); synchronize_irq(dev->irq); - err = scsi_add_host(host, &pci->dev); - if (err) { - printk(KERN_ERR "Unable to add the scsi host\n"); - goto errout; - } - rtsx_init_chip(dev->chip); /* Start up our control thread */ - th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME); + th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start control thread\n"); err = PTR_ERR(th); goto errout; } + dev->ctl_thread = th; - /* Take a reference to the host for the control thread and - * count it among all the threads we have launched. Then - * start it up. */ - scsi_host_get(rtsx_to_host(dev)); - atomic_inc(&total_threads); - wake_up_process(th); + err = scsi_add_host(host, &pci->dev); + if (err) { + printk(KERN_ERR "Unable to add the scsi host\n"); + goto errout; + } /* Start up the thread for delayed SCSI-device scanning */ th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan"); @@ -1024,28 +1020,17 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id goto errout; } - /* Take a reference to the host for the scanning thread and - * count it among all the threads we have launched. Then - * start it up. */ - scsi_host_get(rtsx_to_host(dev)); - atomic_inc(&total_threads); wake_up_process(th); /* Start up the thread for polling thread */ - th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling"); + th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling"); if (IS_ERR(th)) { printk(KERN_ERR "Unable to start the device-polling thread\n"); quiesce_and_remove_host(dev); err = PTR_ERR(th); goto errout; } - - /* Take a reference to the host for the polling thread and - * count it among all the threads we have launched. Then - * start it up. */ - scsi_host_get(rtsx_to_host(dev)); - atomic_inc(&total_threads); - wake_up_process(th); + dev->polling_thread = th; pci_set_drvdata(pci, dev); @@ -1108,16 +1093,6 @@ static void __exit rtsx_exit(void) pci_unregister_driver(&driver); - /* Don't return until all of our control and scanning threads - * have exited. Since each thread signals threads_gone as its - * last act, we have to call wait_for_completion the right number - * of times. - */ - while (atomic_read(&total_threads) > 0) { - wait_for_completion(&threads_gone); - atomic_dec(&total_threads); - } - printk(KERN_INFO "%s module exit\n", CR_DRIVER_NAME); } diff --git a/drivers/staging/rts_pstor/rtsx.h b/drivers/staging/rts_pstor/rtsx.h index 247615b..86e47c2 100644 --- a/drivers/staging/rts_pstor/rtsx.h +++ b/drivers/staging/rts_pstor/rtsx.h @@ -112,9 +112,16 @@ struct rtsx_dev { /* locks */ spinlock_t reg_lock; + struct task_struct *ctl_thread; /* the control thread */ + struct task_struct *polling_thread; /* the polling thread */ + /* mutual exclusion and synchronization structures */ - struct semaphore sema; /* to sleep thread on */ + struct completion cmnd_ready; /* to sleep thread on */ + struct completion control_exit; /* control thread exit */ + struct completion polling_exit; /* polling thread exit */ struct completion notify; /* thread begin/end */ + struct completion scanning_done; /* wait for scan thread */ + wait_queue_head_t delay_wait; /* wait during scan, reset */ struct mutex dev_mutex; -- cgit v0.10.2 From 3ca15c4486beb113700cda44e32109cf0d97528b Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Thu, 4 Aug 2011 19:00:33 -0300 Subject: zcache: Use div_u64 for 64-bit division xv_get_total_size_bytes returns a u64 value and it's used in a division. This causes build failures in 32-bit architectures, as reported by Randy Dunlap. Reported-by: Randy Dunlap Signed-off-by: Thadeu Lima de Souza Cascardo Cc: Stephen Rothwell Cc: Dan Magenheimer Cc: Nitin Gupta Acked-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 66469ac..2c41c44 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "tmem.h" #include "../zram/xvmalloc.h" /* if built in drivers/staging */ @@ -1162,6 +1163,7 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, uint16_t client_id = get_client_id_from_client(cli); unsigned long zv_mean_zsize; unsigned long curr_pers_pampd_count; + u64 total_zsize; if (eph) { ret = zcache_compress(page, &cdata, &clen); @@ -1194,8 +1196,9 @@ static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, } /* reject if mean compression is too poor */ if ((clen > zv_max_mean_zsize) && (curr_pers_pampd_count > 0)) { - zv_mean_zsize = xv_get_total_size_bytes(cli->xvpool) / - curr_pers_pampd_count; + total_zsize = xv_get_total_size_bytes(cli->xvpool); + zv_mean_zsize = div_u64(total_zsize, + curr_pers_pampd_count); if (zv_mean_zsize > zv_max_mean_zsize) { zcache_mean_compress_poor++; goto out; -- cgit v0.10.2 From d8c778fdf2bcd42e495ca1ca67bca729639352b7 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Thu, 4 Aug 2011 15:05:24 -0700 Subject: zcache: Fix build error when sysfs is not defined Signed-off-by: Nitin Gupta Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index 2c41c44..855a5bb 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -1936,9 +1936,9 @@ __setup("nofrontswap", no_frontswap); static int __init zcache_init(void) { -#ifdef CONFIG_SYSFS int ret = 0; +#ifdef CONFIG_SYSFS ret = sysfs_create_group(mm_kobj, &zcache_attr_group); if (ret) { pr_err("zcache: can't create sysfs\n"); -- cgit v0.10.2 From 5c723ba5b7886909b2e430f2eae454c33f7fe5c6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Jul 2011 12:17:11 +0200 Subject: mm: Fix fixup_user_fault() for MMU=n In commit 2efaca927f5c ("mm/futex: fix futex writes on archs with SW tracking of dirty & young") we forgot about MMU=n. This patch fixes that. Signed-off-by: Peter Zijlstra Acked-by: Benjamin Herrenschmidt Acked-by: David Howells Link: http://lkml.kernel.org/r/1311761831.24752.413.camel@twins Signed-off-by: Linus Torvalds diff --git a/include/linux/mm.h b/include/linux/mm.h index f2690cf..fd599f4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -962,6 +962,8 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); +extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int fault_flags); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, @@ -971,6 +973,14 @@ static inline int handle_mm_fault(struct mm_struct *mm, BUG(); return VM_FAULT_SIGBUS; } +static inline int fixup_user_fault(struct task_struct *tsk, + struct mm_struct *mm, unsigned long address, + unsigned int fault_flags) +{ + /* should never happen if there's no MMU */ + BUG(); + return -EFAULT; +} #endif extern int make_pages_present(unsigned long addr, unsigned long end); @@ -988,8 +998,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); -extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, - unsigned long address, unsigned int fault_flags); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); extern void do_invalidatepage(struct page *page, unsigned long offset); -- cgit v0.10.2 From d84d66153b89b267dd6d88defd0932e21fa84b6a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 8 Aug 2011 11:36:51 +0200 Subject: usb: gadget: net2272 - Correct includes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit states: * Please do not include this file in generic code. There is currently * no requirement for any architecture to implement anything held * within this file. prefetch() and prefetchw() need on m68k: drivers/usb/gadget/net2272.c: In function ‘net2272_write_fifo’: drivers/usb/gadget/net2272.c:468: error: implicit declaration of function ‘prefetch’ drivers/usb/gadget/net2272.c: In function ‘net2272_read_fifo’: drivers/usb/gadget/net2272.c:574: error: implicit declaration of function ‘prefetchw’ Signed-off-by: Geert Uytterhoeven Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c index 7c7b0e1..ab98ea9 100644 --- a/drivers/usb/gadget/net2272.c +++ b/drivers/usb/gadget/net2272.c @@ -27,13 +27,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include #include #include #include -- cgit v0.10.2 From d128a259ea4ef7cab39f14b681ee466a7dc6153c Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 3 Aug 2011 21:41:26 -0700 Subject: usb: renesas_usbhs: fix DMA build by including dma-mapping.h Include dma-mapping.h to fix build of the renesas_usbhs driver CC drivers/usb/renesas_usbhs/mod_gadget.o drivers/usb/renesas_usbhs/mod_gadget.c: In function 'usbhsg_dma_map': drivers/usb/renesas_usbhs/mod_gadget.c:190: error: implicit declaration of function 'dma_map_single' drivers/usb/renesas_usbhs/mod_gadget.c:192: error: implicit declaration of function 'dma_sync_single_for_device' drivers/usb/renesas_usbhs/mod_gadget.c:196: error: implicit declaration of function 'dma_mapping_error' drivers/usb/renesas_usbhs/mod_gadget.c: In function 'usbhsg_dma_unmap': drivers/usb/renesas_usbhs/mod_gadget.c:217: error: implicit declaration of function 'dma_unmap_single' drivers/usb/renesas_usbhs/mod_gadget.c:219: error: implicit declaration of function 'dma_sync_single_for_cpu' make[5]: *** [drivers/usb/renesas_usbhs/mod_gadget.o] Error 1 make[4]: *** [drivers/usb/renesas_usbhs] Error 2 Reported-by: Magnus Damm Signed-off-by: Kuninori Morimoto Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index ba79dbf..e7101dc 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -14,6 +14,7 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ +#include #include #include #include -- cgit v0.10.2 From e94c587e78811d95a5b0db094f984eeb99f86388 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 12 Jul 2011 22:01:29 -0700 Subject: usb: renesas_usbhs: fixup usbhsg_for_each_uep 1st pos 1st pos of __usbhsg_for_each_uep() was wrong. Expected uep were ep1, ep2, ep3... but each uep were ep0, ep2, ep3 ... This patch modify it. Signed-off-by: Kuninori Morimoto Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index e7101dc..cb2d451 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -77,7 +77,7 @@ struct usbhsg_recip_handle { struct usbhsg_gpriv, mod) #define __usbhsg_for_each_uep(start, pos, g, i) \ - for (i = start, pos = (g)->uep; \ + for (i = start, pos = (g)->uep + i; \ i < (g)->uep_size; \ i++, pos = (g)->uep + i) -- cgit v0.10.2 From 4f1a7a3e78037721496283ea3e87cfefc64d99c7 Mon Sep 17 00:00:00 2001 From: Maxim Nikulin Date: Sat, 9 Jul 2011 23:44:44 +0700 Subject: USB: assign instead of equal in usbtmc.c Assign operator instead of equality test in the usbtmc_ioctl_abort_bulk_in() function. Signed-off-by: Maxim A. Nikulin Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 385acb8..3f94ac3 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -268,7 +268,7 @@ usbtmc_abort_bulk_in_status: dev_err(dev, "usb_bulk_msg returned %d\n", rv); goto exit; } - } while ((actual = max_size) && + } while ((actual == max_size) && (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN)); if (actual == max_size) { -- cgit v0.10.2 From 03a1d6bf40a273f5a80029f65f9c673308128512 Mon Sep 17 00:00:00 2001 From: Shawn Guo Date: Thu, 21 Jul 2011 22:24:10 +0800 Subject: usb/ehci-mxc: add missing inclusion of mach/hardware.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As cpu_is_mx stuff is being used in the driver, header mach/hardware.h should be explicitly included. The missing of the header is causing today's linux-next build error as bleow. CC drivers/usb/host/ehci-hcd.o In file included from linux-next/drivers/usb/host/ehci-hcd.c:1190:0: linux-next/drivers/usb/host/ehci-mxc.c: In function 'ehci_mxc_drv_probe': linux-next/drivers/usb/host/ehci-mxc.c:175:2: error: implicit declaration of function 'cpu_is_mx35' linux-next/drivers/usb/host/ehci-mxc.c:175:2: error: implicit declaration of function 'cpu_is_mx25' linux-next/drivers/usb/host/ehci-mxc.c:185:2: error: implicit declaration of function 'cpu_is_mx51' Signed-off-by: Shawn Guo Acked-by: Uwe Kleine-König Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index 0c058be..555a73c 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -24,6 +24,7 @@ #include #include +#include #include #include -- cgit v0.10.2 From a871e4f5519d8c52430052e1d340dd5710eb5ad6 Mon Sep 17 00:00:00 2001 From: Nick Bowler Date: Wed, 13 Jul 2011 11:40:09 -0400 Subject: USB: usb-storage: unusual_devs entry for ARM V2M motherboard. Connecting the V2M to a Linux host results in a constant stream of errors spammed to the console, all of the form sd 1:0:0:0: ioctl_internal_command return code = 8070000 : Sense Key : 0x4 [current] : ASC=0x0 ASCQ=0x0 The errors appear to be otherwise harmless. Add an unusual_devs entry which eliminates all of the error messages. Signed-off-by: Nick Bowler Acked-by: Alan Stern Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ccff348..3041a97 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1988,6 +1988,16 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100, "Micro Mini 1GB", USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ), +/* + * Nick Bowler + * SCSI stack spams (otherwise harmless) error messages. + */ +UNUSUAL_DEV( 0xc251, 0x4003, 0x0100, 0x0100, + "Keil Software, Inc.", + "V2M MotherBoard", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NOT_LOCKABLE), + /* Reported by Andrew Simmons */ UNUSUAL_DEV( 0xed06, 0x4500, 0x0001, 0x0001, "DataStor", -- cgit v0.10.2 From eed393667b15a8f42fc0cdf5e8fee49f77c79aae Mon Sep 17 00:00:00 2001 From: Jean-Christophe PLAGNIOL-VILLARD Date: Sun, 29 May 2011 10:01:48 +0200 Subject: USB: at91_udc: include linux/prefetch.h explicitly Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD Cc: Nicolas Ferre Cc: Patrice Vilchez Cc: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 98cbc06..ddb118a 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From 1862cdd542025218f7a390b7e6ddc83a1362d1e0 Mon Sep 17 00:00:00 2001 From: Ionut Nicu Date: Mon, 11 Jul 2011 16:46:12 +0300 Subject: USB: ftdi_sio: fix minor typo in get_ftdi_divisor Even if it's unlikely for this to cause an error, there is a typo in the code that uses the bitwise-AND operator instead of the logical one. Signed-off-by: Ionut Nicu Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 2e06b90..9afb361 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1171,7 +1171,7 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty, case FT2232H: /* FT2232H chip */ case FT4232H: /* FT4232H chip */ case FT232H: /* FT232H chip */ - if ((baud <= 12000000) & (baud >= 1200)) { + if ((baud <= 12000000) && (baud >= 1200)) { div_value = ftdi_2232h_baud_to_divisor(baud); } else if (baud < 1200) { div_value = ftdi_232bm_baud_to_divisor(baud); -- cgit v0.10.2 From 77636c86a600b83de01719efad83567e46d7e8ce Mon Sep 17 00:00:00 2001 From: Boris Todorov Date: Mon, 11 Jul 2011 12:03:33 +0300 Subject: USB: EHCI: Fix test mode sequence The sequence to put port in test mode is not complete. According EHCI specification all enabled ports must be put in suspend. Signed-off-by: Boris Todorov Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index bf2c8f6..e051b30 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -1046,7 +1046,19 @@ static int ehci_hub_control ( if (!selector || selector > 5) goto error; ehci_quiesce(ehci); + + /* Put all enabled ports into suspend */ + while (ports--) { + u32 __iomem *sreg = + &ehci->regs->port_status[ports]; + + temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS; + if (temp & PORT_PE) + ehci_writel(ehci, temp | PORT_SUSPEND, + sreg); + } ehci_halt(ehci); + temp = ehci_readl(ehci, status_reg); temp |= selector << 16; ehci_writel(ehci, temp, status_reg); break; -- cgit v0.10.2 From 17d3e145a4ad680b3d1b1c30d0696a5bbb2b65c4 Mon Sep 17 00:00:00 2001 From: Arvid Brodin Date: Wed, 20 Jul 2011 03:13:46 +0200 Subject: usb/isp1760: Added missing call to usb_hcd_check_unlink_urb() during unlink Signed-off-by: Arvid Brodin Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 55d3d58..840beda 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -1583,6 +1583,9 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int retval = 0; spin_lock_irqsave(&priv->lock, spinflags); + retval = usb_hcd_check_unlink_urb(hcd, urb, status); + if (retval) + goto out; qh = urb->ep->hcpriv; if (!qh) { -- cgit v0.10.2 From 0ee404ccf1c63aa25363940d474c3c03f8d7e882 Mon Sep 17 00:00:00 2001 From: Uwe Bonnes Date: Fri, 22 Jul 2011 10:41:15 +0200 Subject: usb: serial: ftdi_sio.c: For the FT232H FTDI_SIO_SET_BAUDRATE_REQUEST, index needs to be shifted too the recent addition of the FT232H showed that baudrate was set wrong. See gmane.linux.usb.general: "[ftdi_sio] FT232H support". With the old code, the MSB of the 4 encoded fractional divider bits and more important the clock predivider bits got lost. Adding the FT232H to the code patch were these bits are shifted solves the problem. I verified baud rates with a scope now. I suspect, that the BM device probably needs these bits shifted too. But there is no predivider bit, so this is not obvious, and a missing MSB of the encoded fractional divider only shifts the resulting baudrate minimal. The AM has only 3 bits of encoded fractional divider, so it is not impacted. I have no BM device to test, so I only added a comment and left the code for the BM untouched. Signed-off-by: Uwe Bonnes Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 9afb361..7d42f61 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1205,7 +1205,10 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) urb_index_value = get_ftdi_divisor(tty, port); urb_value = (__u16)urb_index_value; urb_index = (__u16)(urb_index_value >> 16); - if (priv->interface) { /* FT2232C */ + if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) || + (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) { + /* Probably the BM type needs the MSB of the encoded fractional + * divider also moved like for the chips above. Any infos? */ urb_index = (__u16)((urb_index << 8) | priv->interface); } -- cgit v0.10.2 From 07b21fd83606263fe6f327b98774d51e13e502fd Mon Sep 17 00:00:00 2001 From: Andrew Bird Date: Wed, 27 Jul 2011 17:03:17 +0100 Subject: USB: option driver: add PID for Vodafone-Huawei K3770 This patch adds the product ID of Huawei's Vodafone K3770 mobile broadband modem to option.c. This is necessary so that the driver gets loaded on demand without the intervention of usb_modeswitch. This has the benefit of it becoming available faster and also ensures that the option driver is not bound to a network interface that should be claimed by cdc_ether. Signed-off-by: Andrew Bird Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 60b25d8..4dbce2d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -148,6 +148,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC +#define HUAWEI_PRODUCT_K3770 0x14C9 #define HUAWEI_PRODUCT_ETS1220 0x1803 #define HUAWEI_PRODUCT_E353 0x1506 @@ -547,6 +548,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, -- cgit v0.10.2 From e2949080792256d1c979aaf30ecd4cab42829f87 Mon Sep 17 00:00:00 2001 From: Andrew Bird Date: Wed, 27 Jul 2011 17:03:18 +0100 Subject: USB: option driver: add PID for Vodafone-Huawei K3771 This patch adds the product ID of Huawei's Vodafone K3771 mobile broadband modem to option.c. This is necessary so that the driver gets loaded on demand without the intervention of usb_modeswitch. This has the benefit of it becoming available faster and also ensures that the option driver is not bound to a network interface that should be claimed by cdc_ether. Signed-off-by: Andrew Bird Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 4dbce2d..b9783b6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -149,6 +149,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC #define HUAWEI_PRODUCT_K3770 0x14C9 +#define HUAWEI_PRODUCT_K3771 0x14CA #define HUAWEI_PRODUCT_ETS1220 0x1803 #define HUAWEI_PRODUCT_E353 0x1506 @@ -550,6 +551,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, -- cgit v0.10.2 From 0930bb46bbbb43afe3381ece2cb2f8a5bc3fb544 Mon Sep 17 00:00:00 2001 From: Andrew Bird Date: Wed, 27 Jul 2011 17:03:19 +0100 Subject: USB: option driver: add PID for Vodafone-Huawei K4510 This patch adds the product ID of Huawei's Vodafone K4510 mobile broadband modem to option.c. This is necessary so that the driver gets loaded on demand without the intervention of usb_modeswitch. This has the benefit of it becoming available faster and also ensures that the option driver is not bound to a network interface that should be claimed by cdc_ether. Signed-off-by: Andrew Bird Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index b9783b6..ceb9336 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -150,6 +150,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_E14AC 0x14AC #define HUAWEI_PRODUCT_K3770 0x14C9 #define HUAWEI_PRODUCT_K3771 0x14CA +#define HUAWEI_PRODUCT_K4510 0x14CB #define HUAWEI_PRODUCT_ETS1220 0x1803 #define HUAWEI_PRODUCT_E353 0x1506 @@ -553,6 +554,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, -- cgit v0.10.2 From 35e9e21fb30dc4452b33aed5cbf233743bffca40 Mon Sep 17 00:00:00 2001 From: Andrew Bird Date: Wed, 27 Jul 2011 17:03:20 +0100 Subject: USB: option driver: add PID for Vodafone-Huawei K4511 This patch adds the product ID of Huawei's Vodafone K4511 mobile broadband modem to option.c. This is necessary so that the driver gets loaded on demand without the intervention of usb_modeswitch. This has the benefit of it becoming available faster and also ensures that the option driver is not bound to a network interface that should be claimed by cdc_ether. Signed-off-by: Andrew Bird Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ceb9336..8156561 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -151,6 +151,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_K3770 0x14C9 #define HUAWEI_PRODUCT_K3771 0x14CA #define HUAWEI_PRODUCT_K4510 0x14CB +#define HUAWEI_PRODUCT_K4511 0x14CC #define HUAWEI_PRODUCT_ETS1220 0x1803 #define HUAWEI_PRODUCT_E353 0x1506 @@ -556,6 +557,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, -- cgit v0.10.2 From de842eff41017721312d2747bcbee89c1beda6d0 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 6 Aug 2011 10:30:45 -0700 Subject: drm/i915: Wait for LVDS panel power sequence During mode setting, check to make sure the panel power sequencing has completed before doing further operations on the device. This uncovered errors with DPMS not turning the device off as it was left locked. Signed-off-by: Keith Packard Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 2e8ddfc..6318828 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -72,14 +72,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 ctl_reg, lvds_reg; + u32 ctl_reg, lvds_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; lvds_reg = PCH_LVDS; + stat_reg = PCH_PP_STATUS; } else { ctl_reg = PP_CONTROL; lvds_reg = LVDS; + stat_reg = PP_STATUS; } I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); @@ -94,17 +96,16 @@ static void intel_lvds_enable(struct intel_lvds *intel_lvds) DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", intel_lvds->pfit_control, intel_lvds->pfit_pgm_ratios); - if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { - DRM_ERROR("timed out waiting for panel to power off\n"); - } else { - I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); - I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); - intel_lvds->pfit_dirty = false; - } + + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); + intel_lvds->pfit_dirty = false; } I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); POSTING_READ(lvds_reg); + if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) + DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(dev); } @@ -113,24 +114,25 @@ static void intel_lvds_disable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 ctl_reg, lvds_reg; + u32 ctl_reg, lvds_reg, stat_reg; if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; lvds_reg = PCH_LVDS; + stat_reg = PCH_PP_STATUS; } else { ctl_reg = PP_CONTROL; lvds_reg = LVDS; + stat_reg = PP_STATUS; } intel_panel_disable_backlight(dev); I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); if (intel_lvds->pfit_control) { - if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) - DRM_ERROR("timed out waiting for panel to power off\n"); - I915_WRITE(PFIT_CONTROL, 0); intel_lvds->pfit_dirty = true; } -- cgit v0.10.2 From ed10fca9c351c83ab89a97f3515089e0d36bdccc Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 6 Aug 2011 10:33:12 -0700 Subject: drm/i915: Leave LVDS registers unlocked There's no reason to relock them; it just makes operations more complex. This fixes DPMS where the panel registers were locked making the disable not work. Signed-off-by: Keith Packard Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 6318828..8b521a2 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -400,53 +400,21 @@ out: static void intel_lvds_prepare(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - /* We try to do the minimum that is necessary in order to unlock - * the registers for mode setting. - * - * On Ironlake, this is quite simple as we just set the unlock key - * and ignore all subtleties. (This may cause some issues...) - * + /* * Prior to Ironlake, we must disable the pipe if we want to adjust * the panel fitter. However at all other times we can just reset * the registers regardless. */ - - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_PP_CONTROL, - I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); - } else if (intel_lvds->pfit_dirty) { - I915_WRITE(PP_CONTROL, - (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS) - & ~POWER_TARGET_ON); - } else { - I915_WRITE(PP_CONTROL, - I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); - } + if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty) + intel_lvds_disable(intel_lvds); } static void intel_lvds_commit(struct drm_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - /* Undo any unlocking done in prepare to prevent accidental - * adjustment of the registers. - */ - if (HAS_PCH_SPLIT(dev)) { - u32 val = I915_READ(PCH_PP_CONTROL); - if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) - I915_WRITE(PCH_PP_CONTROL, val & 0x3); - } else { - u32 val = I915_READ(PP_CONTROL); - if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS) - I915_WRITE(PP_CONTROL, val & 0x3); - } - /* Always do a full power on as we do not know what state * we were left in. */ @@ -1042,6 +1010,19 @@ out: pwm = I915_READ(BLC_PWM_PCH_CTL1); pwm |= PWM_PCH_ENABLE; I915_WRITE(BLC_PWM_PCH_CTL1, pwm); + /* + * Unlock registers and just + * leave them unlocked + */ + I915_WRITE(PCH_PP_CONTROL, + I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS); + } else { + /* + * Unlock registers and just + * leave them unlocked + */ + I915_WRITE(PP_CONTROL, + I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); } dev_priv->lid_notifier.notifier_call = intel_lid_notify; if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { -- cgit v0.10.2 From 1519b9956eb4b4180fa3f47c73341463cdcfaa37 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 6 Aug 2011 10:35:34 -0700 Subject: drm/i915: Fix PCH port pipe select in CPT disable paths CPT pipe select is different from previous generations (using two bits instead of one). All of the paths from intel_disable_pch_ports were not making this distinction. Mode setting with pipe A turned off would then also force all outputs on pipe B to get turned off as the disable code would mistakenly decide that all of these outputs were on pipe A and turn them off. This is an extension of the CPT DP disable fix (why didn't I fix this then?) Signed-off-by: Keith Packard Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d1331f7..5baaef4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1318,6 +1318,7 @@ #define ADPA_PIPE_SELECT_MASK (1<<30) #define ADPA_PIPE_A_SELECT 0 #define ADPA_PIPE_B_SELECT (1<<30) +#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30) #define ADPA_USE_VGA_HVPOLARITY (1<<15) #define ADPA_SETS_HVPOLARITY 0 #define ADPA_VSYNC_CNTL_DISABLE (1<<11) @@ -1460,6 +1461,7 @@ /* Selects pipe B for LVDS data. Must be set on pre-965. */ #define LVDS_PIPEB_SELECT (1 << 30) #define LVDS_PIPE_MASK (1 << 30) +#define LVDS_PIPE(pipe) ((pipe) << 30) /* LVDS dithering flag on 965/g4x platform */ #define LVDS_ENABLE_DITHER (1 << 25) /* LVDS sync polarity flags. Set to invert (i.e. negative) */ @@ -1499,9 +1501,6 @@ #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) -#define LVDS_PIPE_ENABLED(V, P) \ - (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN)) - /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 #define VIDEO_DIP_CTL 0x61170 @@ -3256,14 +3255,12 @@ #define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17) #define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) -#define ADPA_PIPE_ENABLED(V, P) \ - (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE)) - /* or SDVOB */ #define HDMIB 0xe1140 #define PORT_ENABLE (1 << 31) #define TRANSCODER_A (0) #define TRANSCODER_B (1 << 30) +#define TRANSCODER(pipe) ((pipe) << 30) #define TRANSCODER_MASK (1 << 30) #define COLOR_FORMAT_8bpc (0) #define COLOR_FORMAT_12bpc (3 << 26) @@ -3280,9 +3277,6 @@ #define HSYNC_ACTIVE_HIGH (1 << 3) #define PORT_DETECTED (1 << 2) -#define HDMI_PIPE_ENABLED(V, P) \ - (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE)) - /* PCH SDVOB multiplex with HDMIB */ #define PCH_SDVOB HDMIB @@ -3349,6 +3343,7 @@ #define PORT_TRANS_B_SEL_CPT (1<<29) #define PORT_TRANS_C_SEL_CPT (2<<29) #define PORT_TRANS_SEL_MASK (3<<29) +#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29) #define TRANS_DP_CTL_A 0xe0300 #define TRANS_DP_CTL_B 0xe1300 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 35364e6..4c4c903 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -998,6 +998,53 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, return true; } +static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 val) +{ + if ((val & PORT_ENABLE) == 0) + return false; + + if (HAS_PCH_CPT(dev_priv->dev)) { + if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) + return false; + } else { + if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) + return false; + } + return true; +} + +static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 val) +{ + if ((val & LVDS_PORT_EN) == 0) + return false; + + if (HAS_PCH_CPT(dev_priv->dev)) { + if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) + return false; + } else { + if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) + return false; + } + return true; +} + +static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 val) +{ + if ((val & ADPA_DAC_ENABLE) == 0) + return false; + if (HAS_PCH_CPT(dev_priv->dev)) { + if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) + return false; + } else { + if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) + return false; + } + return true; +} + static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 port_sel) { @@ -1011,7 +1058,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - WARN(HDMI_PIPE_ENABLED(val, pipe), + WARN(hdmi_pipe_enabled(dev_priv, val, pipe), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } @@ -1028,13 +1075,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - WARN(ADPA_PIPE_ENABLED(val, pipe), + WARN(adpa_pipe_enabled(dev_priv, val, pipe), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); - WARN(LVDS_PIPE_ENABLED(val, pipe), + WARN(lvds_pipe_enabled(dev_priv, val, pipe), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -1370,7 +1417,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (HDMI_PIPE_ENABLED(val, pipe)) { + if (hdmi_pipe_enabled(dev_priv, val, pipe)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); @@ -1392,12 +1439,13 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - if (ADPA_PIPE_ENABLED(val, pipe)) + if (adpa_pipe_enabled(dev_priv, val, pipe)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); - if (LVDS_PIPE_ENABLED(val, pipe)) { + if (lvds_pipe_enabled(dev_priv, val, pipe)) { + DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); udelay(100); -- cgit v0.10.2 From 4e6343898fe7eed6b3c0c3c809347bc88d5b4a1e Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Sat, 6 Aug 2011 10:39:45 -0700 Subject: drm/i915: Remove unused 'reg' argument to dp_pipe_enabled Just an extra parameter which isn't actually needed. Signed-off-by: Keith Packard Reviewed-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4c4c903..f6f18c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -980,8 +980,8 @@ static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } -static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, enum pipe pipe, - int reg, u32 port_sel, u32 val) +static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 port_sel, u32 val) { if ((val & DP_PORT_EN) == 0) return false; @@ -1049,7 +1049,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 port_sel) { u32 val = I915_READ(reg); - WARN(dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val), + WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); } @@ -1407,7 +1407,7 @@ static void disable_pch_dp(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 port_sel) { u32 val = I915_READ(reg); - if (dp_pipe_enabled(dev_priv, pipe, reg, port_sel, val)) { + if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~DP_PORT_EN); } -- cgit v0.10.2 From a7e6401e19aa54924ab11ee548afaad0a55ffdc6 Mon Sep 17 00:00:00 2001 From: Arnaud Lacombe Date: Thu, 21 Jul 2011 13:16:20 -0400 Subject: usb/host/pci-quirks.c: correct annotation of `ehci_dmi_nohandoff_table' ehci_bios_handoff() is marked __devinit, `ehci_dmi_nohandoff_table' should be marked __devinitconst, not __initconst. This fixes the following section mismatch: WARNING: vmlinux.o(.devinit.text+0x4f08): Section mismatch in reference from the function ehci_bios_handoff() to the variable .init.rodata:ehci_dmi_nohandoff_table The function __devinit ehci_bios_handoff() references a variable __initconst ehci_dmi_nohandoff_table. If ehci_dmi_nohandoff_table is only used by ehci_bios_handoff then annotate ehci_dmi_nohandoff_table with a matching annotation. Cc: Sarah Sharp Signed-off-by: Arnaud Lacombe Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index f4d1b69..629a968 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -535,7 +535,7 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) iounmap(base); } -static const struct dmi_system_id __initconst ehci_dmi_nohandoff_table[] = { +static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = { { /* Pegatron Lucid (ExoPC) */ .matches = { -- cgit v0.10.2 From e468561739fffb972d486b98f66c723936335136 Mon Sep 17 00:00:00 2001 From: Vijay Chavan Date: Tue, 9 Aug 2011 02:41:12 +0530 Subject: USB: Serial: Added device ID for Qualcomm Modem in Sagemcom's HiLo3G A new device ID pair is added for Qualcomm Modem present in Sagemcom's HiLo3G module. Signed-off-by: Vijay Chavan Cc: stable Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 54a9dab..27f9ae4 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -45,6 +45,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ + {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ -- cgit v0.10.2 From a96edd59b2bc88b3d1ea47e0ba48076d65db9302 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Thu, 4 Aug 2011 16:44:42 -0600 Subject: ASoC: Tegra: tegra_pcm_deallocate_dma_buffer: Don't OOPS Not all PCM devices have all sub-streams. Specifically, the SPDIF driver only supports playback and hence has no capture substream. Check whether a substream exists before dereferencing it, when de-allocating DMA buffers in tegra_pcm_deallocate_dma_buffer. Signed-off-by: Stephen Warren Acked-by: Liam Girdwood Signed-off-by: Mark Brown Cc: stable@kernel.org diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c index ff86e5e..c7cfd96 100644 --- a/sound/soc/tegra/tegra_pcm.c +++ b/sound/soc/tegra/tegra_pcm.c @@ -309,9 +309,14 @@ static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) static void tegra_pcm_deallocate_dma_buffer(struct snd_pcm *pcm, int stream) { - struct snd_pcm_substream *substream = pcm->streams[stream].substream; - struct snd_dma_buffer *buf = &substream->dma_buffer; + struct snd_pcm_substream *substream; + struct snd_dma_buffer *buf; + + substream = pcm->streams[stream].substream; + if (!substream) + return; + buf = &substream->dma_buffer; if (!buf->area) return; -- cgit v0.10.2 From 29591ed4ac6fe00e3ff23b5be0cdc7016ef9c47e Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Thu, 4 Aug 2011 16:44:43 -0600 Subject: ASoC: Tegra: wm8903 machine driver: Allow re-insertion of module Two issues were preventing module snd-soc-tegra-wm8903.ko from being removed and re-inserted: a) The speaker-enable GPIO is hosted by the WM8903 chip. This GPIO must be freed before snd_soc_unregister_card() is called, because that triggers wm8903.c:wm8903_remove(), which calls gpiochip_remove(), which then fails if any of the GPIOs are in use. To solve this, free all GPIOs first, so the code doesn't care where they come from. b) We need to call snd_soc_jack_free_gpios() to match the call to snd_soc_jack_add_gpios() during initialization. Without this, the call to snd_soc_jack_add_gpios() fails during any subsequent modprobe and initialization, since the GPIO and IRQ are already registered. In turn, this causes the headphone state not to be monitored, so the headphone is assumed not to be plugged in, and the audio path to it is never enabled. Signed-off-by: Stephen Warren Cc: stable@kernel.org Signed-off-by: Mark Brown diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c index a42e9ac..661373c 100644 --- a/sound/soc/tegra/tegra_wm8903.c +++ b/sound/soc/tegra/tegra_wm8903.c @@ -56,6 +56,7 @@ #define GPIO_HP_MUTE BIT(1) #define GPIO_INT_MIC_EN BIT(2) #define GPIO_EXT_MIC_EN BIT(3) +#define GPIO_HP_DET BIT(4) struct tegra_wm8903 { struct tegra_asoc_utils_data util_data; @@ -304,6 +305,7 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd) snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack, 1, &tegra_wm8903_hp_jack_gpio); + machine->gpio_requested |= GPIO_HP_DET; } snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE, @@ -429,10 +431,10 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev) struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card); struct tegra_wm8903_platform_data *pdata = machine->pdata; - snd_soc_unregister_card(card); - - tegra_asoc_utils_fini(&machine->util_data); - + if (machine->gpio_requested & GPIO_HP_DET) + snd_soc_jack_free_gpios(&tegra_wm8903_hp_jack, + 1, + &tegra_wm8903_hp_jack_gpio); if (machine->gpio_requested & GPIO_EXT_MIC_EN) gpio_free(pdata->gpio_ext_mic_en); if (machine->gpio_requested & GPIO_INT_MIC_EN) @@ -441,6 +443,11 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev) gpio_free(pdata->gpio_hp_mute); if (machine->gpio_requested & GPIO_SPKR_EN) gpio_free(pdata->gpio_spkr_en); + machine->gpio_requested = 0; + + snd_soc_unregister_card(card); + + tegra_asoc_utils_fini(&machine->util_data); kfree(machine); -- cgit v0.10.2 From f99847a6909b95f857ee502ec98c372dcfd90b12 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Thu, 4 Aug 2011 16:44:44 -0600 Subject: ASoC: WM8903: Free IRQ on device removal Without this, request_irq on subsequent device initialization fails, and the codec cannot be used. Signed-off-by: Stephen Warren Acked-by: Liam Girdwood Signed-off-by: Mark Brown diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 43e3d76..4ad8ebd 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -2046,8 +2046,13 @@ static int wm8903_probe(struct snd_soc_codec *codec) /* power down chip */ static int wm8903_remove(struct snd_soc_codec *codec) { + struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec); + wm8903_free_gpio(codec); wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF); + if (wm8903->irq) + free_irq(wm8903->irq, codec); + return 0; } -- cgit v0.10.2 From cd566c64f50e568c0ac3c13bdd15f523631ce845 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Mon, 8 Aug 2011 23:39:59 -0700 Subject: Input: mma8450 - fix module device table type The module device table for of_device_id should use "of" type. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 6c76cf7..0794778 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c @@ -234,7 +234,7 @@ static const struct of_device_id mma8450_dt_ids[] = { { .compatible = "fsl,mma8450", }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(i2c, mma8450_dt_ids); +MODULE_DEVICE_TABLE(of, mma8450_dt_ids); static struct i2c_driver mma8450_driver = { .driver = { -- cgit v0.10.2 From db0b34b07438d92c4c190998c42a502fbf90064e Mon Sep 17 00:00:00 2001 From: "Joshua V. Dillon" Date: Mon, 8 Aug 2011 23:45:14 -0700 Subject: Input: bcm5974 - add support for touchpads found in MacBookAir4,2 Added USB device IDs for MacBookAir4,2 trackpad. Device constants were copied from the MacBookAir3,2 constants. The 4,2 device specification is reportedly unchanged from the 3,2 predecessor and seems to work well. Signed-off-by: Joshua V Dillon Signed-off-by: Chase Douglas Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 3126983..48d9ec1 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -67,6 +67,10 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI 0x0245 #define USB_DEVICE_ID_APPLE_WELLSPRING5_ISO 0x0246 #define USB_DEVICE_ID_APPLE_WELLSPRING5_JIS 0x0247 +/* MacbookAir4,2 (unibody, July 2011) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c +#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d +#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -104,6 +108,10 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5_JIS), + /* MacbookAir4,2 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), /* Terminating entry */ {} }; @@ -294,6 +302,18 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING6_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + }, {} }; -- cgit v0.10.2 From 4d66164e997ea791c5a4cefe6fc2e1fbb3ffb9c8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 6 Aug 2011 09:34:26 +0100 Subject: dmaengine: PL08x: Fix trivial build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Something changed during the 3.1 merge window in the include files which now causes the pl08x DMA engine driver to fail to build. Fix this by adding the now necessary dma-mapping.h include: drivers/dma/amba-pl08x.c: In function ■pl08x_unmap_buffers■: drivers/dma/amba-pl08x.c:1524: error: implicit declaration of function ■dma_unmap_single■ drivers/dma/amba-pl08x.c:1527: error: implicit declaration of function ■dma_unmap_page■ Acked-by: Vinod Koul Acked-by: Linus Walleij Signed-off-by: Russell King diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 196a737..be21e3f 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -80,6 +80,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From 7760d54600a3d6206551c12eb53931ce7369d424 Mon Sep 17 00:00:00 2001 From: "Brian S. Julin" Date: Sun, 24 Jul 2011 16:53:50 +0100 Subject: ARM: 7005/1: freshen up mm/proc-arm946.S The file mm/proc-arm946.S contains a typo and is missing a structure member in __arm946_proc_info. The former prevents compilation and the latter causes problems during boot. It is likely this file was manually copied from a similar file and not tested, then later updates to the *_proc_info structures missed this file. This patch will apply (with offset) with or without the recent macro unification work that has been done in this directory. This was verified against linux-next/stable last week. See arm-linux-kernel thread: http://lists.arm.linux.org.uk/lurker/message/20110718.103237.0106d468.en.html Signed-off-by: Brian S. Julin Signed-off-by: Russell King diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index f8f7ea3..683af3a 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -410,6 +410,7 @@ __arm946_proc_info: .long 0x41009460 .long 0xff00fff0 .long 0 + .long 0 b __arm946_setup .long cpu_arch_name .long cpu_elf_name @@ -418,6 +419,6 @@ __arm946_proc_info: .long arm946_processor_functions .long 0 .long 0 - .long arm940_cache_fns + .long arm946_cache_fns .size __arm946_proc_info, . - __arm946_proc_info -- cgit v0.10.2 From bf912d99e94cd1f43a7decce2e9b79a3ca7f2418 Mon Sep 17 00:00:00 2001 From: Jamie Iles Date: Thu, 4 Aug 2011 09:39:31 +0100 Subject: ARM: 7010/1: mm: fix invalid loop for poison_init_mem poison_init_mem() used a loop of: while ((count = count - 4)) which has 2 problems - an off by one error so that we do one less word than we should, and the other is that if count == 0 then we loop forever and poison too much. On a platform with HAVE_TCM=y but nothing in the TCM's, this caused corruption and the platform failed to boot. Acked-by: Stephen Boyd Acked-by: Nicolas Pitre Signed-off-by: Jamie Iles Signed-off-by: Russell King diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 2fee782..91bca35 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -441,7 +441,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) static inline void poison_init_mem(void *s, size_t count) { u32 *p = (u32 *)s; - while ((count = count - 4)) + for (; count != 0; count -= 4) *p++ = 0xe7fddef0; } -- cgit v0.10.2 From 088c01f1e39dbe93a13e0b00f4532ed8b79d35f4 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 28 Jul 2011 14:28:52 +0100 Subject: ARM: 7007/1: alignment: Prevent ignoring of faults with ARMv6 unaligned access model Currently, it's possible to set the kernel to ignore alignment faults when changing the alignment fault handling mode at runtime via /proc/sys/alignment, even though this is undesirable on ARMv6 and above, where it can result in infinite spins where an un-fixed- up instruction repeatedly faults. In addition, the kernel clobbers any alignment mode specified on the command-line if running on ARMv6 or above. This patch factors out the necessary safety check into a couple of new helper functions, and checks and modifies the fault handling mode as appropriate on boot and on writes to /proc/cpu/alignment. Prior to ARMv6, the behaviour is unchanged. For ARMv6 and above, the behaviour changes as follows: * Attempting to ignore faults on ARMv6 results in the mode being forced to UM_FIXUP instead. A warning is printed if this happened as a result of a write to /proc/cpu/alignment. The user's UM_WARN bit (if present) is still honoured. * An alignment= argument from the kernel command-line is now honoured, except that the kernel will modify the specified mode as described above. This is allows modes such as UM_SIGNAL and UM_WARN to be active immediately from boot, which is useful for debugging purposes. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index be7c638..1df38e8 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -95,6 +95,33 @@ static const char *usermode_action[] = { "signal+warn" }; +/* Return true if and only if the ARMv6 unaligned access model is in use. */ +static bool cpu_is_v6_unaligned(void) +{ + return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); +} + +static int safe_usermode(int new_usermode, bool warn) +{ + /* + * ARMv6 and later CPUs can perform unaligned accesses for + * most single load and store instructions up to word size. + * LDM, STM, LDRD and STRD still need to be handled. + * + * Ignoring the alignment fault is not an option on these + * CPUs since we spin re-faulting the instruction without + * making any progress. + */ + if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) { + new_usermode |= UM_FIXUP; + + if (warn) + printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); + } + + return new_usermode; +} + static int alignment_proc_show(struct seq_file *m, void *v) { seq_printf(m, "User:\t\t%lu\n", ai_user); @@ -125,7 +152,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') - ai_usermode = mode - '0'; + ai_usermode = safe_usermode(mode - '0', true); } return count; } @@ -926,20 +953,11 @@ static int __init alignment_init(void) return -ENOMEM; #endif - /* - * ARMv6 and later CPUs can perform unaligned accesses for - * most single load and store instructions up to word size. - * LDM, STM, LDRD and STRD still need to be handled. - * - * Ignoring the alignment fault is not an option on these - * CPUs since we spin re-faulting the instruction without - * making any progress. - */ - if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) { + if (cpu_is_v6_unaligned()) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); - ai_usermode = UM_FIXUP; + ai_usermode = safe_usermode(ai_usermode, false); } hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, -- cgit v0.10.2 From 2102a65e69eac8d77dd71b4991b395e825087ba8 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 28 Jul 2011 14:29:40 +0100 Subject: ARM: 7008/1: alignment: Make SIGBUS sent to userspace POSIXly correct With the UM_SIGNAL alignment fault mode, no siginfo structure is passed to userspace. POSIX specifies how siginfo_t should be populated for alignment faults, so this patch does just that: * si_signo = SIGBUS * si_code = BUS_ADRALN * si_addr = misaligned data address at which access was attempted Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Acked-by: Kirill A. Shutemov Reviewed-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 1df38e8..cfbcf8b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -22,6 +22,7 @@ #include #include +#include #include #include "fault.h" @@ -913,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & UM_FIXUP) goto fixup; - if (ai_usermode & UM_SIGNAL) - force_sig(SIGBUS, current); - else { + if (ai_usermode & UM_SIGNAL) { + siginfo_t si; + + si.si_signo = SIGBUS; + si.si_errno = 0; + si.si_code = BUS_ADRALN; + si.si_addr = (void __user *)addr; + + force_sig_info(si.si_signo, &si, current); + } else { /* * We're about to disable the alignment trap and return to * user space. But if an interrupt occurs before actually -- cgit v0.10.2 From 80e0401e35410a69bfae05b454db8a7187edd6b8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 5 Aug 2011 14:26:17 +0200 Subject: lockdep: Fix wrong assumption in match_held_lock match_held_lock() was assuming it was being called on a lock class that had already seen usage. This condition was true for bug-free code using lockdep_assert_held(), since you're in fact holding the lock when calling it. However the assumption fails the moment you assume the assertion can fail, which is the whole point of having the assertion in the first place. Anyway, now that there's more lockdep_is_held() users, notably __rcu_dereference_check(), its much easier to trigger this since we test for a number of locks and we only need to hold any one of them to be good. Reported-by: Sergey Senozhatsky Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1312547787.28695.2.camel@twins Signed-off-by: Ingo Molnar diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 8c24294..91d67ce 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -3111,7 +3111,13 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) if (!class) class = look_up_lock_class(lock, 0); - if (DEBUG_LOCKS_WARN_ON(!class)) + /* + * If look_up_lock_class() failed to find a class, we're trying + * to test if we hold a lock that has never yet been acquired. + * Clearly if the lock hasn't been acquired _ever_, we're not + * holding it either, so report failure. + */ + if (!class) return 0; if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) -- cgit v0.10.2 From a34668f6beb4ab01e07683276d6a24bab6c175e0 Mon Sep 17 00:00:00 2001 From: Youquan Song Date: Tue, 2 Aug 2011 14:01:35 +0800 Subject: perf, x86: Add model 45 SandyBridge support Add support to Romely-EP SandyBridge. Signed-off-by: Youquan Song Signed-off-by: Anhua Xu Signed-off-by: Lin Ming Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1312264895-2010-1-git-send-email-youquan.song@intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 45fbb8f..f88af2c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1590,6 +1590,7 @@ static __init int intel_pmu_init(void) break; case 42: /* SandyBridge */ + case 45: /* SandyBridge, "Romely-EP" */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); -- cgit v0.10.2 From 6fbabb20faed9c08f8b96de4182bd721cbd1cfcf Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 8 Aug 2011 11:16:56 -0500 Subject: slub: Fix full list corruption if debugging is on When a slab is freed by __slab_free() and the slab can only contain a single object ever then it was full (and therefore not on the partial lists but on the full list in the debug case) before we reached slab_empty. This caused the following full list corruption when SLUB debugging was enabled: [ 5913.233035] ------------[ cut here ]------------ [ 5913.233097] WARNING: at lib/list_debug.c:53 __list_del_entry+0x8d/0x98() [ 5913.233101] Hardware name: Adamo 13 [ 5913.233105] list_del corruption. prev->next should be ffffea000434fd20, but was ffffea0004199520 [ 5913.233108] Modules linked in: nfs fscache fuse ebtable_nat ebtables ppdev parport_pc lp parport ipt_MASQUERADE iptable_nat nf_nat nfsd lockd nfs_acl auth_rpcgss xt_CHECKSUM sunrpc iptable_mangle bridge stp llc cpufreq_ondemand acpi_cpufreq freq_table mperf ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables rfcomm bnep arc4 iwlagn snd_hda_codec_hdmi snd_hda_codec_idt snd_hda_intel btusb mac80211 snd_hda_codec bluetooth snd_hwdep snd_seq snd_seq_device snd_pcm usb_debug dell_wmi sparse_keymap cdc_ether usbnet cdc_acm uvcvideo cdc_wdm mii cfg80211 snd_timer dell_laptop videodev dcdbas snd microcode v4l2_compat_ioctl32 soundcore joydev tg3 pcspkr snd_page_alloc iTCO_wdt i2c_i801 rfkill iTCO_vendor_support wmi virtio_net kvm_intel kvm ipv6 xts gf128mul dm_crypt i915 drm_kms_helper drm i2c_algo_bit i2c_core video [last unloaded: scsi_wait_scan] [ 5913.233213] Pid: 0, comm: swapper Not tainted 3.0.0+ #127 [ 5913.233213] Call Trace: [ 5913.233213] [] warn_slowpath_common+0x83/0x9b [ 5913.233213] [] warn_slowpath_fmt+0x46/0x48 [ 5913.233213] [] __list_del_entry+0x8d/0x98 [ 5913.233213] [] list_del+0xe/0x2d [ 5913.233213] [] __slab_free+0x1db/0x235 [ 5913.233213] [] ? bvec_free_bs+0x35/0x37 [ 5913.233213] [] ? bvec_free_bs+0x35/0x37 [ 5913.233213] [] ? bvec_free_bs+0x35/0x37 [ 5913.233213] [] kmem_cache_free+0x88/0x102 [ 5913.233213] [] bvec_free_bs+0x35/0x37 [ 5913.233213] [] bio_free+0x34/0x64 [ 5913.233213] [] dm_bio_destructor+0x12/0x14 [ 5913.233213] [] bio_put+0x2b/0x2d [ 5913.233213] [] clone_endio+0x9e/0xb4 [ 5913.233213] [] bio_endio+0x2d/0x2f [ 5913.233213] [] crypt_dec_pending+0x5c/0x8b [dm_crypt] [ 5913.233213] [] crypt_endio+0x78/0x81 [dm_crypt] [ Full discussion here: https://lkml.org/lkml/2011/8/4/375 ] Make sure that we remove such a slab also from the full lists. Reported-and-tested-by: Dave Jones Reported-and-tested-by: Xiaotian Feng Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg diff --git a/mm/slub.c b/mm/slub.c index eb5a8f9..5436fe2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2387,11 +2387,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page, slab_empty: if (prior) { /* - * Slab still on the partial list. + * Slab on the partial list. */ remove_partial(n, page); stat(s, FREE_REMOVE_PARTIAL); - } + } else + /* Slab must be on the full list */ + remove_full(s, page); spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); -- cgit v0.10.2 From ef62fb32b7b21731e41aea3c1e08bcdb407c9eb9 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 7 Aug 2011 18:30:38 +0900 Subject: slub: fix check_bytes() for slub debugging The check_bytes() function is used by slub debugging. It returns a pointer to the first unmatching byte for a character in the given memory area. If the character for matching byte is greater than 0x80, check_bytes() doesn't work. Becuase 64-bit pattern is generated as below. value64 = value | value << 8 | value << 16 | value << 24; value64 = value64 | value64 << 32; The integer promotions are performed and sign-extended as the type of value is u8. The upper 32 bits of value64 is 0xffffffff in the first line, and the second line has no effect. This fixes the 64-bit pattern generation. Signed-off-by: Akinobu Mita Cc: Christoph Lameter Cc: Matt Mackall Reviewed-by: Marcin Slusarz Acked-by: Eric Dumazet Signed-off-by: Pekka Enberg diff --git a/mm/slub.c b/mm/slub.c index 5436fe2..6da6859 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -701,7 +701,7 @@ static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes) return check_bytes8(start, value, bytes); value64 = value | value << 8 | value << 16 | value << 24; - value64 = value64 | value64 << 32; + value64 = (value64 & 0xffffffff) | value64 << 32; prefix = 8 - ((unsigned long)start) % 8; if (prefix) { -- cgit v0.10.2 From ea5e116162b7e0cf83a2b8a273440514404604de Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Wed, 3 Aug 2011 11:12:17 -0400 Subject: xen/blkback: Make description more obvious. With the frontend having Xen but the backend not, it just looks odd: <*> Xen virtual block device support <*> Block-device backend driver Fix it to have the 'Xen' in front of it. Reported-by: Sander Eikelenboom Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 717d6e4..a89ebf1 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -471,7 +471,7 @@ config XEN_BLKDEV_FRONTEND in another domain which drives the actual block device. config XEN_BLKDEV_BACKEND - tristate "Block-device backend driver" + tristate "Xen block-device backend driver" depends on XEN_BACKEND help The block-device backend driver allows the kernel to export its -- cgit v0.10.2 From 6678050442e90a4e9511a9ed14b9bdfc5e393323 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 3 Aug 2011 17:36:48 +0900 Subject: ASoC: Fix binding of WM8750 on Jive The I2C address is misformatted and would never match. Signed-off-by: Mark Brown Acked-by: Liam Girdwood Cc: stable@kernel.org diff --git a/sound/soc/samsung/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c index 3b53ad5..14eb6ea 100644 --- a/sound/soc/samsung/jive_wm8750.c +++ b/sound/soc/samsung/jive_wm8750.c @@ -131,7 +131,7 @@ static struct snd_soc_dai_link jive_dai = { .cpu_dai_name = "s3c2412-i2s", .codec_dai_name = "wm8750-hifi", .platform_name = "samsung-audio", - .codec_name = "wm8750-codec.0-0x1a", + .codec_name = "wm8750-codec.0-001a", .init = jive_wm8750_init, .ops = &jive_ops, }; -- cgit v0.10.2 From 40045a85df0ec4406fe611967ea9cf9fa668f493 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 3 Aug 2011 18:32:09 +0900 Subject: ASoC: Fix SPI driver binding for WM8987 As we had no id_table only the driver name would be matched against meaning that WM8987 devices wouldn't be bound. Signed-off-by: Mark Brown Acked-by: Liam Girdwood diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c index 38f38fd..65fe78a 100644 --- a/sound/soc/codecs/wm8750.c +++ b/sound/soc/codecs/wm8750.c @@ -778,11 +778,18 @@ static int __devexit wm8750_spi_remove(struct spi_device *spi) return 0; } +static const struct spi_device_id wm8750_spi_ids[] = { + { "wm8750", 0 }, + { "wm8987", 0 }, +}; +MODULE_DEVICE_TABLE(spi, wm8750_spi_id); + static struct spi_driver wm8750_spi_driver = { .driver = { .name = "wm8750-codec", .owner = THIS_MODULE, }, + .id_table = wm8750_spi_ids, .probe = wm8750_spi_probe, .remove = __devexit_p(wm8750_spi_remove), }; -- cgit v0.10.2 From 371e7305c6c348d9e14a98fe337fadbd4106cfef Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 4 Aug 2011 10:54:17 +0900 Subject: ASoC: Fix warning in Speyside WM8962 Signed-off-by: Mark Brown Acked-by: Liam Girdwood diff --git a/sound/soc/samsung/speyside_wm8962.c b/sound/soc/samsung/speyside_wm8962.c index 8ac42bf..0b9eb5f 100644 --- a/sound/soc/samsung/speyside_wm8962.c +++ b/sound/soc/samsung/speyside_wm8962.c @@ -37,7 +37,7 @@ static int speyside_wm8962_set_bias_level(struct snd_soc_card *card, 44100 * 256, SND_SOC_CLOCK_IN); if (ret < 0) { - pr_err("Failed to set SYSCLK: %d\n"); + pr_err("Failed to set SYSCLK: %d\n", ret); return ret; } } -- cgit v0.10.2 From ab04fc5890381e7dc907e809d27bf0f683de6e82 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 9 Aug 2011 14:30:37 +0100 Subject: gma500: Fix clashes with DRM updates The private object support has migrated from gma500 into the DRM core, remove our now clashing copy. Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/staging/gma500/gem_glue.c index 779ac1a..daac121 100644 --- a/drivers/staging/gma500/gem_glue.c +++ b/drivers/staging/gma500/gem_glue.c @@ -20,26 +20,6 @@ #include #include -/** - * Initialize an already allocated GEM object of the specified size with - * no GEM provided backing store. Instead the caller is responsible for - * backing the object and handling it. - */ -int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size) -{ - BUG_ON((size & (PAGE_SIZE - 1)) != 0); - - obj->dev = dev; - obj->filp = NULL; - - kref_init(&obj->refcount); - atomic_set(&obj->handle_count, 0); - obj->size = size; - - return 0; -} - void drm_gem_object_release_wrap(struct drm_gem_object *obj) { /* Remove the list map if one is present */ @@ -51,8 +31,7 @@ void drm_gem_object_release_wrap(struct drm_gem_object *obj) kfree(list->map); list->map = NULL; } - if (obj->filp) - drm_gem_object_release(obj); + drm_gem_object_release(obj); } /** diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/staging/gma500/gem_glue.h index a0f2bc4..ce5ce30 100644 --- a/drivers/staging/gma500/gem_glue.h +++ b/drivers/staging/gma500/gem_glue.h @@ -1,4 +1,2 @@ extern void drm_gem_object_release_wrap(struct drm_gem_object *obj); -extern int drm_gem_private_object_init(struct drm_device *dev, - struct drm_gem_object *obj, size_t size); extern int gem_create_mmap_offset(struct drm_gem_object *obj); -- cgit v0.10.2 From 069e3725dd9be3b759a98e8c80ac5fc38b392b23 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 9 Aug 2011 12:42:13 -0300 Subject: perf tools: Check $HOME/.perfconfig ownership Just like we do already for perf.data files. Requested-by: Ingo Molnar Cc: Ben Hutchings Cc: Christian Ohm Cc: David Ahern Cc: Frederic Weisbecker Cc: Jonathan Nieder Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-qgokmxsmvppwpc5404qhyk7e@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 6c86eca..fe02903 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -413,13 +413,32 @@ int perf_config(config_fn_t fn, void *data) home = getenv("HOME"); if (perf_config_global() && home) { char *user_config = strdup(mkpath("%s/.perfconfig", home)); - if (!access(user_config, R_OK)) { - ret += perf_config_from_file(fn, user_config, data); - found += 1; + struct stat st; + + if (user_config == NULL) { + warning("Not enough memory to process %s/.perfconfig, " + "ignoring it.", home); + goto out; + } + + if (stat(user_config, &st) < 0) + goto out_free; + + if (st.st_uid && (st.st_uid != geteuid())) { + warning("File %s not owned by current user or root, " + "ignoring it.", user_config); + goto out_free; } + + if (!st.st_size) + goto out_free; + + ret += perf_config_from_file(fn, user_config, data); + found += 1; +out_free: free(user_config); } - +out: if (found == 0) return -1; return ret; -- cgit v0.10.2 From 4c09bafae37d870ab8efc50faeeb4855cb55b5b7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 8 Aug 2011 23:03:34 +0200 Subject: perf sched: Do not delete session object prematurely The session object is released prematurely when processing events for latency command. The session's thread objects are used within the output_lat_thread function. Runnning following commands: # perf sched record # perf sched latency the latter displays incorrect data and might cause access violation. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1312837414-3819-1-git-send-email-jolsa@redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index dcfe887..3d5702c 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1637,23 +1637,29 @@ static struct perf_event_ops event_ops = { .ordered_samples = true, }; -static int read_events(void) +static void read_events(bool destroy, struct perf_session **psession) { int err = -EINVAL; struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0, false, &event_ops); if (session == NULL) - return -ENOMEM; + die("No Memory"); if (perf_session__has_traces(session, "record -R")) { err = perf_session__process_events(session, &event_ops); + if (err) + die("Failed to process events, error %d", err); + nr_events = session->hists.stats.nr_events[0]; nr_lost_events = session->hists.stats.total_lost; nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; } - perf_session__delete(session); - return err; + if (destroy) + perf_session__delete(session); + + if (psession) + *psession = session; } static void print_bad_events(void) @@ -1689,9 +1695,10 @@ static void print_bad_events(void) static void __cmd_lat(void) { struct rb_node *next; + struct perf_session *session; setup_pager(); - read_events(); + read_events(false, &session); sort_lat(); printf("\n ---------------------------------------------------------------------------------------------------------------\n"); @@ -1717,6 +1724,7 @@ static void __cmd_lat(void) print_bad_events(); printf("\n"); + perf_session__delete(session); } static struct trace_sched_handler map_ops = { @@ -1731,7 +1739,7 @@ static void __cmd_map(void) max_cpu = sysconf(_SC_NPROCESSORS_CONF); setup_pager(); - read_events(); + read_events(true, NULL); print_bad_events(); } @@ -1744,7 +1752,7 @@ static void __cmd_replay(void) test_calibrations(); - read_events(); + read_events(true, NULL); printf("nr_run_events: %ld\n", nr_run_events); printf("nr_sleep_events: %ld\n", nr_sleep_events); -- cgit v0.10.2 From 580cabed88ebc631e740b16010f2fa6ba882652f Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 9 Aug 2011 14:46:51 +0200 Subject: perf sched: Usage leftover from trace -> script rename The 'perf sched' command usage still showing 'trace' command instead of the 'script' command. Cc: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110809124651.GD2056@jolsa.brq.redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 3d5702c..5177964 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1777,7 +1777,7 @@ static void __cmd_replay(void) static const char * const sched_usage[] = { - "perf sched [] {record|latency|map|replay|trace}", + "perf sched [] {record|latency|map|replay|script}", NULL }; -- cgit v0.10.2 From da64c6fc4aba6f02aa800db72411f459a9f86809 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Tue, 9 Aug 2011 09:17:46 -0700 Subject: drm/i915: show interrupt info on IVB IVB uses the same interrupt reg layout as SNB, so add an IS_GEN7 to the interrupt debugfs file. Signed-off-by: Jesse Barnes Signed-off-by: Keith Packard diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a8ab626..3c395a5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); for (i = 0; i < I915_NUM_RINGS; i++) { - if (IS_GEN6(dev)) { + if (IS_GEN6(dev) || IS_GEN7(dev)) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", dev_priv->ring[i].name, I915_READ_IMR(&dev_priv->ring[i])); -- cgit v0.10.2 From 13d83a672e9bbd52ae82c2f611dfd845a957e8b4 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Wed, 3 Aug 2011 12:59:20 -0700 Subject: drm/i915: split out PCH refclk update code We ought to be calling this from our DPMS routines as well as global state may change and we need to enable/disable clocks. So split out the code in preparation for further changes. Signed-off-by: Jesse Barnes Signed-off-by: Keith Packard diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f6f18c7..ee1d701 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5097,6 +5097,81 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, return ret; } +static void ironlake_update_pch_refclk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_crtc *crtc; + struct intel_encoder *encoder; + struct intel_encoder *has_edp_encoder = NULL; + u32 temp; + bool has_lvds = false; + + /* We need to take the global config into account */ + list_for_each_entry(crtc, &mode_config->crtc_list, head) { + if (!crtc->enabled) + continue; + + list_for_each_entry(encoder, &mode_config->encoder_list, + base.head) { + if (encoder->base.crtc != crtc) + continue; + + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + has_lvds = true; + case INTEL_OUTPUT_EDP: + has_edp_encoder = encoder; + break; + } + } + } + + /* Ironlake: try to setup display ref clock before DPLL + * enabling. This is only under driver's control after + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ + temp = I915_READ(PCH_DREF_CONTROL); + /* Always enable nonspread source */ + temp &= ~DREF_NONSPREAD_SOURCE_MASK; + temp |= DREF_NONSPREAD_SOURCE_ENABLE; + temp &= ~DREF_SSC_SOURCE_MASK; + temp |= DREF_SSC_SOURCE_ENABLE; + I915_WRITE(PCH_DREF_CONTROL, temp); + + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + + if (has_edp_encoder) { + if (intel_panel_use_ssc(dev_priv)) { + temp |= DREF_SSC1_ENABLE; + I915_WRITE(PCH_DREF_CONTROL, temp); + + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } + temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Enable CPU source on CPU attached eDP */ + if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (intel_panel_use_ssc(dev_priv)) + temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else { + /* Enable SSC on PCH eDP if needed */ + if (intel_panel_use_ssc(dev_priv)) { + DRM_ERROR("enabling SSC on PCH\n"); + temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; + } + } + I915_WRITE(PCH_DREF_CONTROL, temp); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); + } +} + static int ironlake_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -5292,49 +5367,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); - /* Ironlake: try to setup display ref clock before DPLL - * enabling. This is only under driver's control after - * PCH B stepping, previous chipset stepping should be - * ignoring this setting. - */ - temp = I915_READ(PCH_DREF_CONTROL); - /* Always enable nonspread source */ - temp &= ~DREF_NONSPREAD_SOURCE_MASK; - temp |= DREF_NONSPREAD_SOURCE_ENABLE; - temp &= ~DREF_SSC_SOURCE_MASK; - temp |= DREF_SSC_SOURCE_ENABLE; - I915_WRITE(PCH_DREF_CONTROL, temp); - - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); - - if (has_edp_encoder) { - if (intel_panel_use_ssc(dev_priv)) { - temp |= DREF_SSC1_ENABLE; - I915_WRITE(PCH_DREF_CONTROL, temp); - - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); - } - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Enable CPU source on CPU attached eDP */ - if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { - if (intel_panel_use_ssc(dev_priv)) - temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - else - temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else { - /* Enable SSC on PCH eDP if needed */ - if (intel_panel_use_ssc(dev_priv)) { - DRM_ERROR("enabling SSC on PCH\n"); - temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; - } - } - I915_WRITE(PCH_DREF_CONTROL, temp); - POSTING_READ(PCH_DREF_CONTROL); - udelay(200); - } + ironlake_update_pch_refclk(dev); fp = clock.n << 16 | clock.m1 << 8 | clock.m2; if (has_reduced_clock) -- cgit v0.10.2 From 89272b8c0d427021bed70b1b83e1a16be375ccf5 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Fri, 5 Aug 2011 16:50:30 -0600 Subject: dt: add empty of_get_property for non-dt The patch adds empty function of_get_property for non-dt build, so that drivers migrating to dt can save some '#ifdef CONFIG_OF'. This also fixes the current Tegra compile problem in linux-next. Signed-off-by: Stephen Warren Signed-off-by: Grant Likely diff --git a/include/linux/of.h b/include/linux/of.h index 0085bb0..9180dc5 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -256,6 +256,13 @@ static inline int of_property_read_string(struct device_node *np, return -ENOSYS; } +static inline const void *of_get_property(const struct device_node *node, + const char *name, + int *lenp) +{ + return NULL; +} + #endif /* CONFIG_OF */ static inline int of_property_read_u32(const struct device_node *np, -- cgit v0.10.2 From 81107188f123e3c2217ac2f2feb2a1147904c62f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Tue, 9 Aug 2011 13:01:32 -0500 Subject: slub: Fix partial count comparison confusion deactivate_slab() has the comparison if more than the minimum number of partial pages are in the partial list wrong. An effect of this may be that empty pages are not freed from deactivate_slab(). The result could be an OOM due to growth of the partial slabs per node. Frees mostly occur from __slab_free which is okay so this would only affect use cases where a lot of switching around of per cpu slabs occur. Switching per cpu slabs occurs with high frequency if debugging options are enabled. Reported-and-tested-by: Xiaotian Feng Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg diff --git a/mm/slub.c b/mm/slub.c index 6da6859..9f662d7 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1854,7 +1854,7 @@ redo: new.frozen = 0; - if (!new.inuse && n->nr_partial < s->min_partial) + if (!new.inuse && n->nr_partial > s->min_partial) m = M_FREE; else if (new.freelist) { m = M_PARTIAL; -- cgit v0.10.2 From 981c1252691f4b855f2bb47ea93fb6052ea3aee2 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Tue, 9 Aug 2011 22:54:18 +0300 Subject: perf symbols: Check '/tmp/perf-' symbol file ownership The external symbol files are generated by JIT compilers, for example, but we need to make sure they're ours before injecting them to 'perf report'. Requested-by: Ingo Molnar Cc: Frederic Weisbecker Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1312919658-17158-1-git-send-email-penberg@kernel.org Signed-off-by: Pekka Enberg Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index eec1963..a8b5371 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1504,6 +1504,17 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) dso->adjust_symbols = 0; if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { + struct stat st; + + if (stat(dso->name, &st) < 0) + return -1; + + if (st.st_uid && (st.st_uid != geteuid())) { + pr_warning("File %s not owned by current user or root, " + "ignoring it.\n", dso->name); + return -1; + } + ret = dso__load_perf_map(dso, map, filter); dso->symtab_type = ret > 0 ? SYMTAB__JAVA_JIT : SYMTAB__NOT_FOUND; -- cgit v0.10.2 From fa1bf42ff9296ac4cf211b0a1b450a6071d26a95 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Tue, 9 Aug 2011 20:32:09 +0200 Subject: allow blk_flush_policy to return REQ_FSEQ_DATA independent of *FLUSH blk_insert_flush has the following check: /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. */ if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { list_add_tail(&rq->queuelist, &q->queue_head); return; } However, blk_flush_policy will not return with policy set to only REQ_FSEQ_DATA: static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) { unsigned int policy = 0; if (fflags & REQ_FLUSH) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; if (blk_rq_sectors(rq)) policy |= REQ_FSEQ_DATA; if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } return policy; } Notice that REQ_FSEQ_DATA is only set if REQ_FLUSH is set. Fix this mismatch by moving the setting of REQ_FSEQ_DATA outside of the REQ_FLUSH check. Tejun notes: Hmmm... yes, this can become a correctness issue if (and only if) blk_queue_flush() is called to change q->flush_flags while requests are in-flight; otherwise, requests wouldn't reach the function at all. Also, I think it would be a generally good idea to always set FSEQ_DATA if the request has data. Cheers, Jeff Signed-off-by: Jeff Moyer Acked-by: Tejun Heo Signed-off-by: Jens Axboe diff --git a/block/blk-flush.c b/block/blk-flush.c index bb21e4c..2d162bd 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -95,11 +95,12 @@ static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) { unsigned int policy = 0; + if (blk_rq_sectors(rq)) + policy |= REQ_FSEQ_DATA; + if (fflags & REQ_FLUSH) { if (rq->cmd_flags & REQ_FLUSH) policy |= REQ_FSEQ_PREFLUSH; - if (blk_rq_sectors(rq)) - policy |= REQ_FSEQ_DATA; if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) policy |= REQ_FSEQ_POSTFLUSH; } -- cgit v0.10.2 From f61500e000eedc0c7a0201200a7f00ba5529c002 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Thu, 4 Aug 2011 22:58:51 -0500 Subject: eCryptfs: Return error when lower file pointer is NULL When an eCryptfs inode's lower file has been closed, and the pointer has been set to NULL, return an error when trying to do a lower read or write rather than calling BUG(). https://bugzilla.kernel.org/show_bug.cgi?id=37292 Signed-off-by: Tyler Hicks Cc: diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 85d4309..3745f7c 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c @@ -39,15 +39,16 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, loff_t offset, size_t size) { - struct ecryptfs_inode_info *inode_info; + struct file *lower_file; mm_segment_t fs_save; ssize_t rc; - inode_info = ecryptfs_inode_to_private(ecryptfs_inode); - BUG_ON(!inode_info->lower_file); + lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; + if (!lower_file) + return -EIO; fs_save = get_fs(); set_fs(get_ds()); - rc = vfs_write(inode_info->lower_file, data, size, &offset); + rc = vfs_write(lower_file, data, size, &offset); set_fs(fs_save); mark_inode_dirty_sync(ecryptfs_inode); return rc; @@ -225,15 +226,16 @@ out: int ecryptfs_read_lower(char *data, loff_t offset, size_t size, struct inode *ecryptfs_inode) { - struct ecryptfs_inode_info *inode_info = - ecryptfs_inode_to_private(ecryptfs_inode); + struct file *lower_file; mm_segment_t fs_save; ssize_t rc; - BUG_ON(!inode_info->lower_file); + lower_file = ecryptfs_inode_to_private(ecryptfs_inode)->lower_file; + if (!lower_file) + return -EIO; fs_save = get_fs(); set_fs(get_ds()); - rc = vfs_read(inode_info->lower_file, data, size, &offset); + rc = vfs_read(lower_file, data, size, &offset); set_fs(fs_save); return rc; } -- cgit v0.10.2 From 4b6fee17b1758391281ddf5b00328035573f8be1 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Mon, 1 Aug 2011 13:33:38 +0200 Subject: eCryptfs: fix compile error This patch fixes the compile error reported at the address: https://bugzilla.kernel.org/show_bug.cgi?id=40292 The problem arises when compiling eCryptfs as built-in and the 'encrypted' key type as a module. The patch prevents this combination from being set in the kernel configuration, by fixing the eCryptfs dependencies. Signed-off-by: Roberto Sassu Reported-by: David Hill Signed-off-by: Tyler Hicks diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig index 1cd6d9d..cc16562 100644 --- a/fs/ecryptfs/Kconfig +++ b/fs/ecryptfs/Kconfig @@ -1,6 +1,6 @@ config ECRYPT_FS tristate "eCrypt filesystem layer support (EXPERIMENTAL)" - depends on EXPERIMENTAL && KEYS && CRYPTO + depends on EXPERIMENTAL && KEYS && CRYPTO && (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n) select CRYPTO_ECB select CRYPTO_CBC select CRYPTO_MD5 -- cgit v0.10.2 From 99b373ff2d1246f64b97a3d449a2fd6018d504e6 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Fri, 5 Aug 2011 04:15:19 -0500 Subject: eCryptfs: Fix payload_len unitialized variable warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fs/ecryptfs/keystore.c: In function ‘ecryptfs_generate_key_packet_set’: fs/ecryptfs/keystore.c:1991:28: warning: ‘payload_len’ may be used uninitialized in this function [-Wuninitialized] fs/ecryptfs/keystore.c:1976:9: note: ‘payload_len’ was declared here Signed-off-by: Tyler Hicks diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 08a2b52..ac1ad48 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1973,7 +1973,7 @@ pki_encrypt_session_key(struct key *auth_tok_key, { struct ecryptfs_msg_ctx *msg_ctx = NULL; char *payload = NULL; - size_t payload_len; + size_t payload_len = 0; struct ecryptfs_message *msg; int rc; -- cgit v0.10.2 From 88ff98775885d72618cbfc5ed6b865593cb66891 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 9 Aug 2011 12:36:00 -0700 Subject: [IA64] fix "allnoconfig" build Link errors: arch/ia64/kernel/built-in.o: In function `arch_setup_dmar_msi': (.text+0x35972): undefined reference to `dmar_msi_write' ... and more ... because allnoconfig has CONFIG_DMAR=y due to the "select DMAR" in arch/ia64/Kconfig under config IA64_GENERIC. Drop that select, but add CONFIG_DMAR=y to generic_defconfig so we keep testbuilding the DMAR code. Signed-off-by: Tony Luck diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1248547..3ff7785 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -162,7 +162,6 @@ config IA64_GENERIC select ACPI_NUMA select SWIOTLB select PCI_MSI - select DMAR help This selects the system type of your hardware. A "generic" kernel will run on any supported IA-64 system. However, if you configure diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index 1d7bca0..0e5cd14 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -234,3 +234,4 @@ CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y CONFIG_MISC_DEVICES=y +CONFIG_DMAR=y -- cgit v0.10.2 From 5b36c9b4a9872f7852b8ce7cffbfc2f6d6c25371 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Sat, 23 Jul 2011 13:57:33 +0200 Subject: b43: read correct register on bcma bus. This causes an databus error on a Broadcom SoC using bcma. Signed-off-by: Hauke Mehrtens Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 83cba22..481e534 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -795,9 +795,23 @@ static u64 supported_dma_mask(struct b43_wldev *dev) u32 tmp; u16 mmio_base; - tmp = b43_read32(dev, SSB_TMSHIGH); - if (tmp & SSB_TMSHIGH_DMA64) - return DMA_BIT_MASK(64); + switch (dev->dev->bus_type) { +#ifdef CONFIG_B43_BCMA + case B43_BUS_BCMA: + tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); + if (tmp & BCMA_IOST_DMA64) + return DMA_BIT_MASK(64); + break; +#endif +#ifdef CONFIG_B43_SSB + case B43_BUS_SSB: + tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH); + if (tmp & SSB_TMSHIGH_DMA64) + return DMA_BIT_MASK(64); + break; +#endif + } + mmio_base = b43_dmacontroller_base(0, 0); b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); -- cgit v0.10.2 From 15052f81d255eac44e745bc630b36aa86779ad9d Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Fri, 29 Jul 2011 17:38:15 +0530 Subject: ath9k_hw: Fix incorrect Tx control power in AR9003 template CTL power data incorrect in ctlPowerData_2G field of ar9300_eeprom. Setting incorrect CTL power in calibration is causing lower tx power. Tx power was reported as 3dBm while operating in channel 6 HT40+/ in channel 11 HT40- due to CTL powers in the calibration is set to zero. Cc: stable@kernel.org Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index d109c25..6cfc9eb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -307,7 +307,7 @@ static const struct ar9300_eeprom ar9300_default = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, - { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, + { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, @@ -884,7 +884,7 @@ static const struct ar9300_eeprom ar9300_x113 = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, - { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, + { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, @@ -2040,7 +2040,7 @@ static const struct ar9300_eeprom ar9300_x112 = { { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, - { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, + { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, -- cgit v0.10.2 From 1fa707aa3e5c43b094983787267d80d0dfa44e4d Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Fri, 29 Jul 2011 17:38:17 +0530 Subject: ath9k_hw: update PMU to improve ripple issue for AR9485 The commit ebefce3d13f8b5a871337ff7c3821ee140c1ea8a failed to set proper PMU value to address ripple issue for AR9485. Cc: stable@kernel.org Signed-off-by: Rajkumar Manoharan Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 6cfc9eb..c34bef1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3734,7 +3734,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) } } else { reg_pmu_set = (5 << 1) | (7 << 4) | - (1 << 8) | (2 << 14) | + (2 << 8) | (2 << 14) | (6 << 17) | (1 << 20) | (3 << 24) | (1 << 28); } -- cgit v0.10.2 From 03f18fa16cd805916a56d027b4ee52ba26e6d6ab Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Tue, 2 Aug 2011 16:52:33 -0500 Subject: rtlwifi: rtl892cu: New USB IDs This patch fixes several problems in the USB_DEVICE table, including missing IDs, reversed vendor/product codes, and a duplicate ID. Signed-off-by: Larry Finger Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 942f7a3..ef63c0d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -281,6 +281,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, /* 8188CE-VAU USB minCard (b/g mode only) */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, + /* 8188RU in Alfa AWUS036NHR */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)}, /* 8188 Combo for BC4 */ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, @@ -303,20 +305,23 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ /* HP - Lite-On ,8188CUS Slim Combo */ {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */ {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ - {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ + {RTL_USB_DEVICE(0x13d3, 0x3358, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ - {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(0x13d3, 0x3359, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(0x4855, 0x0090, rtl92cu_hal_cfg)}, /* Feixun */ + {RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */ + {RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */ /****** 8192CU ********/ {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ - {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/ {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ -- cgit v0.10.2 From b8b1ec61c006ed1d3104093556544e858cc2ddd1 Mon Sep 17 00:00:00 2001 From: Ivo van Doorn Date: Wed, 3 Aug 2011 21:09:49 +0200 Subject: rt2x00: Add new rt73 buffalo USB id Reported-by: Maik-Holger Freudenberg Signed-off-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 6a93939..0baeb89 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2420,6 +2420,7 @@ static struct usb_device_id rt73usb_device_table[] = { /* Buffalo */ { USB_DEVICE(0x0411, 0x00d8) }, { USB_DEVICE(0x0411, 0x00d9) }, + { USB_DEVICE(0x0411, 0x00e6) }, { USB_DEVICE(0x0411, 0x00f4) }, { USB_DEVICE(0x0411, 0x0116) }, { USB_DEVICE(0x0411, 0x0119) }, -- cgit v0.10.2 From 118c9db51e7acaf8f16deae8311cce6588b83e31 Mon Sep 17 00:00:00 2001 From: Alex Hacker Date: Thu, 4 Aug 2011 13:47:32 +0600 Subject: ath9k: fix a misprint which leads to incorrect calibration This patch addresses an issue with incorrect HW register AR_PHY_TX_IQCAL_CORR_COEFF_B1 definition which leads to incorrect clibration. Cc: stable@kernel.org Signed-off-by: Alex Hacker Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 6de3f0b..5c59042 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -850,7 +850,7 @@ #define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) #define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) #define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) -#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2)) +#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2)) /* * Channel 2 Register Map -- cgit v0.10.2 From 276b02e2a0ada2e0196852c312560ffdabcebddc Mon Sep 17 00:00:00 2001 From: Anthony Bourguignon Date: Fri, 5 Aug 2011 07:46:32 +0200 Subject: rt2x00: Add rt2870 device id for Dvico usb key This patch add a device id for the wifi usb keys shiped by DVICO with some of their tvix hardware. Signed-off-by: Anthony Bourguignon Acked-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 5075593..9395631 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -921,6 +921,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x07d1, 0x3c16) }, /* Draytek */ { USB_DEVICE(0x07fa, 0x7712) }, + /* DVICO */ + { USB_DEVICE(0x0fe9, 0xb307) }, /* Edimax */ { USB_DEVICE(0x7392, 0x7711) }, { USB_DEVICE(0x7392, 0x7717) }, -- cgit v0.10.2 From bdc71bc59231f5542af13b5061b9ab124d093050 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Sun, 7 Aug 2011 19:36:07 -0400 Subject: ath5k: fix error handling in ath5k_beacon_send This cleans up error handling for the beacon in case of dma mapping failure. We need to free the skb when dma mapping fails instead of nulling and leaking the pointer, and we should bail out to avoid giving the hardware the bad descriptor. Finally, we need to perform the null check after trying to update the beacon, or else beacons will never be sent after a single mapping failure. Cc: stable@kernel.org Signed-off-by: Bob Copeland Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index f54dff4..c3119a6 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1735,6 +1735,8 @@ ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf) if (dma_mapping_error(ah->dev, bf->skbaddr)) { ATH5K_ERR(ah, "beacon DMA mapping failed\n"); + dev_kfree_skb_any(skb); + bf->skb = NULL; return -EIO; } @@ -1819,8 +1821,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) ath5k_txbuf_free_skb(ah, avf->bbuf); avf->bbuf->skb = skb; ret = ath5k_beacon_setup(ah, avf->bbuf); - if (ret) - avf->bbuf->skb = NULL; out: return ret; } @@ -1840,6 +1840,7 @@ ath5k_beacon_send(struct ath5k_hw *ah) struct ath5k_vif *avf; struct ath5k_buf *bf; struct sk_buff *skb; + int err; ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n"); @@ -1888,11 +1889,6 @@ ath5k_beacon_send(struct ath5k_hw *ah) avf = (void *)vif->drv_priv; bf = avf->bbuf; - if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || - ah->opmode == NL80211_IFTYPE_MONITOR)) { - ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); - return; - } /* * Stop any current dma and put the new frame on the queue. @@ -1906,8 +1902,17 @@ ath5k_beacon_send(struct ath5k_hw *ah) /* refresh the beacon for AP or MESH mode */ if (ah->opmode == NL80211_IFTYPE_AP || - ah->opmode == NL80211_IFTYPE_MESH_POINT) - ath5k_beacon_update(ah->hw, vif); + ah->opmode == NL80211_IFTYPE_MESH_POINT) { + err = ath5k_beacon_update(ah->hw, vif); + if (err) + return; + } + + if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION || + ah->opmode == NL80211_IFTYPE_MONITOR)) { + ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb); + return; + } trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]); -- cgit v0.10.2 From e44f4112a46ca817fe2758aac7bf7893a64a8c0e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 22 Jul 2011 16:04:41 +0000 Subject: xfs: set cursor in xfs_ail_splice() even when AIL was empty In xfs_ail_splice(), if a cursor is provided it is updated to point to the last item on the list being spliced into the AIL. But if the AIL was found to be empty, the cursor (if provided) is just initialized instead. There is no reason the empty AIL case needs to be treated any differently. And treating it the same way allows this code to be rearranged a bit, with a somewhat tidier result. Signed-off-by: Alex Elder Reviewed-by: Dave Chinner diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 43233e9..c15aa29 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -299,7 +299,7 @@ xfs_trans_ail_cursor_last( * Splice the log item list into the AIL at the given LSN. We splice to the * tail of the given LSN to maintain insert order for push traversals. The * cursor is optional, allowing repeated updates to the same LSN to avoid - * repeated traversals. + * repeated traversals. This should not be called with an empty list. */ static void xfs_ail_splice( @@ -308,50 +308,39 @@ xfs_ail_splice( struct list_head *list, xfs_lsn_t lsn) { - struct xfs_log_item *lip = cur ? cur->item : NULL; - struct xfs_log_item *next_lip; + struct xfs_log_item *lip; + + ASSERT(!list_empty(list)); /* - * Get a new cursor if we don't have a placeholder or the existing one - * has been invalidated. + * Use the cursor to determine the insertion point if one is + * provided. If not, or if the one we got is not valid, + * find the place in the AIL where the items belong. */ - if (!lip || (__psint_t)lip & 1) { + lip = cur ? cur->item : NULL; + if (!lip || (__psint_t) lip & 1) lip = __xfs_trans_ail_cursor_last(ailp, lsn); - if (!lip) { - /* The list is empty, so just splice and return. */ - if (cur) - cur->item = NULL; - list_splice(list, &ailp->xa_ail); - return; - } - } + /* + * If a cursor is provided, we know we're processing the AIL + * in lsn order, and future items to be spliced in will + * follow the last one being inserted now. Update the + * cursor to point to that last item, now while we have a + * reliable pointer to it. + */ + if (cur) + cur->item = list_entry(list->prev, struct xfs_log_item, li_ail); /* - * Our cursor points to the item we want to insert _after_, so we have - * to update the cursor to point to the end of the list we are splicing - * in so that it points to the correct location for the next splice. - * i.e. before the splice - * - * lsn -> lsn -> lsn + x -> lsn + x ... - * ^ - * | cursor points here - * - * After the splice we have: - * - * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... - * ^ ^ - * | cursor points here | needs to move here - * - * So we set the cursor to the last item in the list to be spliced - * before we execute the splice, resulting in the cursor pointing to - * the correct item after the splice occurs. + * Finally perform the splice. Unless the AIL was empty, + * lip points to the item in the AIL _after_ which the new + * items should go. If lip is null the AIL was empty, so + * the new items go at the head of the AIL. */ - if (cur) { - next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); - cur->item = next_lip; - } - list_splice(list, &lip->li_ail); + if (lip) + list_splice(list, &lip->li_ail); + else + list_splice(list, &ailp->xa_ail); } /* @@ -682,6 +671,7 @@ xfs_trans_ail_update_bulk( int i; LIST_HEAD(tmp); + ASSERT(nr_items > 0); /* Not required, but true. */ mlip = xfs_ail_min(ailp); for (i = 0; i < nr_items; i++) { @@ -701,7 +691,8 @@ xfs_trans_ail_update_bulk( list_add(&lip->li_ail, &tmp); } - xfs_ail_splice(ailp, cur, &tmp, lsn); + if (!list_empty(&tmp)) + xfs_ail_splice(ailp, cur, &tmp, lsn); if (!mlip_changed) { spin_unlock(&ailp->xa_lock); -- cgit v0.10.2 From 9f50fad65b87a8776ae989ca059ad6c17925dfc3 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 9 Aug 2011 11:56:26 +0200 Subject: Revert "memcg: get rid of percpu_charge_mutex lock" This reverts commit 8521fc50d433507a7cdc96bec280f9e5888a54cc. The patch incorrectly assumes that using atomic FLUSHING_CACHED_CHARGE bit operations is sufficient but that is not true. Johannes Weiner has reported a crash during parallel memory cgroup removal: BUG: unable to handle kernel NULL pointer dereference at 0000000000000018 IP: [] css_is_ancestor+0x20/0x70 Oops: 0000 [#1] PREEMPT SMP Pid: 19677, comm: rmdir Tainted: G W 3.0.0-mm1-00188-gf38d32b #35 ECS MCP61M-M3/MCP61M-M3 RIP: 0010:[] css_is_ancestor+0x20/0x70 RSP: 0018:ffff880077b09c88 EFLAGS: 00010202 Process rmdir (pid: 19677, threadinfo ffff880077b08000, task ffff8800781bb310) Call Trace: [] mem_cgroup_same_or_subtree+0x33/0x40 [] drain_all_stock+0x11f/0x170 [] mem_cgroup_force_empty+0x231/0x6d0 [] mem_cgroup_pre_destroy+0x14/0x20 [] cgroup_rmdir+0xb9/0x500 [] vfs_rmdir+0x86/0xe0 [] do_rmdir+0xfb/0x110 [] sys_rmdir+0x16/0x20 [] system_call_fastpath+0x16/0x1b We are crashing because we try to dereference cached memcg when we are checking whether we should wait for draining on the cache. The cache is already cleaned up, though. There is also a theoretical chance that the cached memcg gets freed between we test for the FLUSHING_CACHED_CHARGE and dereference it in mem_cgroup_same_or_subtree: CPU0 CPU1 CPU2 mem=stock->cached stock->cached=NULL clear_bit test_and_set_bit test_bit() ... mem_cgroup_destroy use after free The percpu_charge_mutex protected from this race because sync draining is exclusive. It is safer to revert now and come up with a more parallel implementation later. Signed-off-by: Michal Hocko Reported-by: Johannes Weiner Acked-by: Johannes Weiner Acked-by: KAMEZAWA Hiroyuki Cc: stable@kernel.org Signed-off-by: Linus Torvalds diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f4ec4e7..930de94 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2091,6 +2091,7 @@ struct memcg_stock_pcp { #define FLUSHING_CACHED_CHARGE (0) }; static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); +static DEFINE_MUTEX(percpu_charge_mutex); /* * Try to consume stocked charge on this cpu. If success, one page is consumed @@ -2197,8 +2198,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - if (mem_cgroup_same_or_subtree(root_mem, stock->cached) && - test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) + if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) flush_work(&stock->work); } out: @@ -2213,14 +2213,22 @@ out: */ static void drain_all_stock_async(struct mem_cgroup *root_mem) { + /* + * If someone calls draining, avoid adding more kworker runs. + */ + if (!mutex_trylock(&percpu_charge_mutex)) + return; drain_all_stock(root_mem, false); + mutex_unlock(&percpu_charge_mutex); } /* This is a synchronous drain interface. */ static void drain_all_stock_sync(struct mem_cgroup *root_mem) { /* called when force_empty is called */ + mutex_lock(&percpu_charge_mutex); drain_all_stock(root_mem, true); + mutex_unlock(&percpu_charge_mutex); } /* -- cgit v0.10.2 From f2c0d0266cc5eb36a4aa44944b4096ec121490aa Mon Sep 17 00:00:00 2001 From: Jonathan Nieder Date: Mon, 8 Aug 2011 06:22:43 +0200 Subject: cap_syslog: don't use WARN_ONCE for CAP_SYS_ADMIN deprecation warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit syslog-ng versions before 3.3.0beta1 (2011-05-12) assume that CAP_SYS_ADMIN is sufficient to access syslog, so ever since CAP_SYSLOG was introduced (2010-11-25) they have triggered a warning. Commit ee24aebffb75 ("cap_syslog: accept CAP_SYS_ADMIN for now") improved matters a little by making syslog-ng work again, just keeping the WARN_ONCE(). But still, this is a warning that writes a stack trace we don't care about to syslog, sets a taint flag, and alarms sysadmins when nothing worse has happened than use of an old userspace with a recent kernel. Convert the WARN_ONCE to a printk_once to avoid that while continuing to give userspace developers a hint that this is an unwanted backward-compatibility feature and won't be around forever. Reported-by: Ralf Hildebrandt Reported-by: Niels Reported-by: Paweł Sikora Signed-off-by: Jonathan Nieder Liked-by: Gergely Nagy Acked-by: Serge Hallyn Acked-by: James Morris Signed-off-by: Linus Torvalds diff --git a/kernel/printk.c b/kernel/printk.c index 37dff34..836a2ae 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -318,8 +318,10 @@ static int check_syslog_permissions(int type, bool from_file) return 0; /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ if (capable(CAP_SYS_ADMIN)) { - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " - "but no CAP_SYSLOG (deprecated).\n"); + printk_once(KERN_WARNING "%s (%d): " + "Attempt to access syslog with CAP_SYS_ADMIN " + "but no CAP_SYSLOG (deprecated).\n", + current->comm, task_pid_nr(current)); return 0; } return -EPERM; -- cgit v0.10.2 From 764355487ea220fdc2faf128d577d7f679b91f97 Mon Sep 17 00:00:00 2001 From: John Johansen Date: Fri, 22 Jul 2011 08:14:15 -0700 Subject: Ecryptfs: Add mount option to check uid of device being mounted = expect uid Close a TOCTOU race for mounts done via ecryptfs-mount-private. The mount source (device) can be raced when the ownership test is done in userspace. Provide Ecryptfs a means to force the uid check at mount time. Signed-off-by: John Johansen Cc: Signed-off-by: Tyler Hicks diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 9f1bb74..b4a6bef 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -175,6 +175,7 @@ enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_encrypted_view, ecryptfs_opt_fnek_sig, ecryptfs_opt_fn_cipher, ecryptfs_opt_fn_cipher_key_bytes, ecryptfs_opt_unlink_sigs, ecryptfs_opt_mount_auth_tok_only, + ecryptfs_opt_check_dev_ruid, ecryptfs_opt_err }; static const match_table_t tokens = { @@ -191,6 +192,7 @@ static const match_table_t tokens = { {ecryptfs_opt_fn_cipher_key_bytes, "ecryptfs_fn_key_bytes=%u"}, {ecryptfs_opt_unlink_sigs, "ecryptfs_unlink_sigs"}, {ecryptfs_opt_mount_auth_tok_only, "ecryptfs_mount_auth_tok_only"}, + {ecryptfs_opt_check_dev_ruid, "ecryptfs_check_dev_ruid"}, {ecryptfs_opt_err, NULL} }; @@ -236,6 +238,7 @@ static void ecryptfs_init_mount_crypt_stat( * ecryptfs_parse_options * @sb: The ecryptfs super block * @options: The options passed to the kernel + * @check_ruid: set to 1 if device uid should be checked against the ruid * * Parse mount options: * debug=N - ecryptfs_verbosity level for debug output @@ -251,7 +254,8 @@ static void ecryptfs_init_mount_crypt_stat( * * Returns zero on success; non-zero on error */ -static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) +static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, + uid_t *check_ruid) { char *p; int rc = 0; @@ -276,6 +280,8 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) char *cipher_key_bytes_src; char *fn_cipher_key_bytes_src; + *check_ruid = 0; + if (!options) { rc = -EINVAL; goto out; @@ -380,6 +386,9 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options) mount_crypt_stat->flags |= ECRYPTFS_GLOBAL_MOUNT_AUTH_TOK_ONLY; break; + case ecryptfs_opt_check_dev_ruid: + *check_ruid = 1; + break; case ecryptfs_opt_err: default: printk(KERN_WARNING @@ -475,6 +484,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags const char *err = "Getting sb failed"; struct inode *inode; struct path path; + uid_t check_ruid; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); @@ -483,7 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } - rc = ecryptfs_parse_options(sbi, raw_data); + rc = ecryptfs_parse_options(sbi, raw_data, &check_ruid); if (rc) { err = "Error parsing options"; goto out; @@ -521,6 +531,15 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags "known incompatibilities\n"); goto out_free; } + + if (check_ruid && path.dentry->d_inode->i_uid != current_uid()) { + rc = -EPERM; + printk(KERN_ERR "Mount of device (uid: %d) not owned by " + "requested user (uid: %d)\n", + path.dentry->d_inode->i_uid, current_uid()); + goto out_free; + } + ecryptfs_set_superblock_lower(s, path.dentry->d_sb); s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; -- cgit v0.10.2 From 9be6dd6510fde5cfa2ab73f238754d38ee6797bc Mon Sep 17 00:00:00 2001 From: Andrei Warkentin Date: Fri, 5 Aug 2011 11:04:10 +0000 Subject: Bridge: Always send NETDEV_CHANGEADDR up on br MAC change. This ensures the neighbor entries associated with the bridge dev are flushed, also invalidating the associated cached L2 headers. This means we br_add_if/br_del_if ports to implement hand-over and not wind up with bridge packets going out with stale MAC. This means we can also change MAC of port device and also not wind up with bridge packets going out with stale MAC. This builds on Stephen Hemminger's patch, also handling the br_del_if case and the port MAC change case. Cc: Stephen Hemminger Signed-off-by: Andrei Warkentin Acked-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 3176e2e..2cdf007 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -417,6 +417,7 @@ put_back: int br_del_if(struct net_bridge *br, struct net_device *dev) { struct net_bridge_port *p; + bool changed_addr; p = br_port_get_rtnl(dev); if (!p || p->br != br) @@ -425,9 +426,12 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) del_nbp(p); spin_lock_bh(&br->lock); - br_stp_recalculate_bridge_id(br); + changed_addr = br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); + if (changed_addr) + call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); + netdev_update_features(br->dev); return 0; diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c index 6545ee9..a76b621 100644 --- a/net/bridge/br_notify.c +++ b/net/bridge/br_notify.c @@ -34,6 +34,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v struct net_device *dev = ptr; struct net_bridge_port *p; struct net_bridge *br; + bool changed_addr; int err; /* register of bridge completed, add sysfs entries */ @@ -57,8 +58,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v case NETDEV_CHANGEADDR: spin_lock_bh(&br->lock); br_fdb_changeaddr(p, dev->dev_addr); - br_stp_recalculate_bridge_id(br); + changed_addr = br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); + + if (changed_addr) + call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); + break; case NETDEV_CHANGE: -- cgit v0.10.2 From 3557619f0f6f7496ed453d4825e24958ab1884e0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 9 Aug 2011 02:04:43 +0000 Subject: net_sched: prio: use qdisc_dequeue_peeked commit 07bd8df5df4369487812bf85a237322ff3569b77 (sch_sfq: fix peek() implementation) changed sfq to use generic peek helper. This makes HFSC complain about a non-work-conserving child qdisc, if prio with sfq child is used within hfsc: hfsc peeks into prio qdisc, which will then peek into sfq. returned skb is stashed in sch->gso_skb. Next, hfsc tries to dequeue from prio, but prio will call sfq dequeue directly, which may return NULL instead of previously peeked-at skb. Have prio call qdisc_dequeue_peeked, so sfq->dequeue() is not called in this case. Cc: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 2a318f2..b5d56a2 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -112,7 +112,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) for (prio = 0; prio < q->bands; prio++) { struct Qdisc *qdisc = q->queues[prio]; - struct sk_buff *skb = qdisc->dequeue(qdisc); + struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); if (skb) { qdisc_bstats_update(sch, skb); sch->q.qlen--; -- cgit v0.10.2 From 511d8cf0ab3d2e4ec3f3f672b06a83f17874b83b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 10 Aug 2011 09:41:26 +0900 Subject: ASoC: Fix typo in wm8750 spi_ids Signed-off-by: Mark Brown Reported-by: Stephen Rothwell diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c index 65fe78a..e6f47f4 100644 --- a/sound/soc/codecs/wm8750.c +++ b/sound/soc/codecs/wm8750.c @@ -782,7 +782,7 @@ static const struct spi_device_id wm8750_spi_ids[] = { { "wm8750", 0 }, { "wm8987", 0 }, }; -MODULE_DEVICE_TABLE(spi, wm8750_spi_id); +MODULE_DEVICE_TABLE(spi, wm8750_spi_ids); static struct spi_driver wm8750_spi_driver = { .driver = { -- cgit v0.10.2 From c9c9e4e4252c9d554222906e4a843efd27c0ac96 Mon Sep 17 00:00:00 2001 From: Kazutomo Yoshii Date: Tue, 9 Aug 2011 23:39:13 -0500 Subject: ALSA: usb-audio - Add quirk for BOSS Micro BR-80 Signed-off-by: Kazutomo Yoshii Signed-off-by: Takashi Iwai diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 4d4f865..a42e3ef 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -1707,6 +1707,40 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + USB_DEVICE(0x0582, 0x0130), + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { + /* .vendor_name = "BOSS", */ + /* .product_name = "MICRO BR-80", */ + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_IGNORE_INTERFACE + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE + }, + { + .ifnum = 3, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = & (const struct snd_usb_midi_endpoint_info) { + .out_cables = 0x0001, + .in_cables = 0x0001 + } + }, + { + .ifnum = -1 + } + } + } +}, /* Guillemot devices */ { -- cgit v0.10.2 From d12d1fcafa8115602a8ce0c4a9256c7d3abdb5e1 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 9 Aug 2011 15:36:50 +0300 Subject: OMAP: Fix linking error in twl-common.c for OMAP2/3/4 only builds Commit b22f954 (OMAP4: Move common twl6030 configuration to twl-common) caused compile failures for code for OMAP arch which is not selected by the config. Fixes issues like: With CONFIG_ARCH_OMAP3=y and CONFIG_ARCH_OMAP4=n, I'm getting this: arch/arm/mach-omap2/built-in.o:(.data+0xf99c): undefined reference to `omap4430_phy_init' arch/arm/mach-omap2/built-in.o:(.data+0xf9a0): undefined reference to `omap4430_phy_exit' arch/arm/mach-omap2/built-in.o:(.data+0xf9a4): undefined reference to `omap4430_phy_power' arch/arm/mach-omap2/built-in.o:(.data+0xf9a8): undefined reference to `omap4430_phy_set_clk' arch/arm/mach-omap2/built-in.o:(.data+0xf9ac): undefined reference to `omap4430_phy_suspend' Fix the problem by moving the code to ifdef sections for omap3 and omap4. Signed-off-by: Peter Ujfalusi [tony@atomide.com: updated comments] Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 2543342..daa056e 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c @@ -48,14 +48,7 @@ void __init omap_pmic_init(int bus, u32 clkrate, omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); } -static struct twl4030_usb_data omap4_usb_pdata = { - .phy_init = omap4430_phy_init, - .phy_exit = omap4430_phy_exit, - .phy_power = omap4430_phy_power, - .phy_set_clock = omap4430_phy_set_clk, - .phy_suspend = omap4430_phy_suspend, -}; - +#if defined(CONFIG_ARCH_OMAP3) static struct twl4030_usb_data omap3_usb_pdata = { .usb_mode = T2_USB_MODE_ULPI, }; @@ -122,6 +115,45 @@ static struct regulator_init_data omap3_vpll2_idata = { .consumer_supplies = omap3_vpll2_supplies, }; +void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, + u32 pdata_flags, u32 regulators_flags) +{ + if (!pmic_data->irq_base) + pmic_data->irq_base = TWL4030_IRQ_BASE; + if (!pmic_data->irq_end) + pmic_data->irq_end = TWL4030_IRQ_END; + + /* Common platform data configurations */ + if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) + pmic_data->usb = &omap3_usb_pdata; + + if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) + pmic_data->bci = &omap3_bci_pdata; + + if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) + pmic_data->madc = &omap3_madc_pdata; + + if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) + pmic_data->audio = &omap3_audio_pdata; + + /* Common regulator configurations */ + if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) + pmic_data->vdac = &omap3_vdac_idata; + + if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) + pmic_data->vpll2 = &omap3_vpll2_idata; +} +#endif /* CONFIG_ARCH_OMAP3 */ + +#if defined(CONFIG_ARCH_OMAP4) +static struct twl4030_usb_data omap4_usb_pdata = { + .phy_init = omap4430_phy_init, + .phy_exit = omap4430_phy_exit, + .phy_power = omap4430_phy_power, + .phy_set_clock = omap4430_phy_set_clk, + .phy_suspend = omap4430_phy_suspend, +}; + static struct regulator_init_data omap4_vdac_idata = { .constraints = { .min_uV = 1800000, @@ -273,32 +305,4 @@ void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data, !pmic_data->clk32kg) pmic_data->clk32kg = &omap4_clk32kg_idata; } - -void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, - u32 pdata_flags, u32 regulators_flags) -{ - if (!pmic_data->irq_base) - pmic_data->irq_base = TWL4030_IRQ_BASE; - if (!pmic_data->irq_end) - pmic_data->irq_end = TWL4030_IRQ_END; - - /* Common platform data configurations */ - if (pdata_flags & TWL_COMMON_PDATA_USB && !pmic_data->usb) - pmic_data->usb = &omap3_usb_pdata; - - if (pdata_flags & TWL_COMMON_PDATA_BCI && !pmic_data->bci) - pmic_data->bci = &omap3_bci_pdata; - - if (pdata_flags & TWL_COMMON_PDATA_MADC && !pmic_data->madc) - pmic_data->madc = &omap3_madc_pdata; - - if (pdata_flags & TWL_COMMON_PDATA_AUDIO && !pmic_data->audio) - pmic_data->audio = &omap3_audio_pdata; - - /* Common regulator configurations */ - if (regulators_flags & TWL_COMMON_REGULATOR_VDAC && !pmic_data->vdac) - pmic_data->vdac = &omap3_vdac_idata; - - if (regulators_flags & TWL_COMMON_REGULATOR_VPLL2 && !pmic_data->vpll2) - pmic_data->vpll2 = &omap3_vpll2_idata; -} +#endif /* CONFIG_ARCH_OMAP4 */ -- cgit v0.10.2 From c9a48c2aac235f7a7e31fb7214a33afcd0da26b7 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Wed, 10 Aug 2011 00:57:42 -0600 Subject: OMAP: hwmod: fix build break on non-OMAP4 multi-OMAP2 builds Builds for multi-OMAP2 (e.g., OMAP2420 with OMAP2430) with CONFIG_ARCH_OMAP4=n fail with the following errors: arch/arm/mach-omap2/built-in.o: In function `_enable_module': arch/arm/mach-omap2/omap_hwmod.c:701: undefined reference to `omap4_cminst_module_enable' arch/arm/mach-omap2/built-in.o: In function `_disable_module': arch/arm/mach-omap2/omap_hwmod.c:726: undefined reference to `omap4_cminst_module_disable' arch/arm/mach-omap2/built-in.o: In function `_wait_target_disable': arch/arm/mach-omap2/omap_hwmod.c:1179: undefined reference to `omap4_cminst_wait_module_idle' This is probably due to the preprocessor directives in arch/arm/plat-omap/include/plat/cpu.h that convert some cpu_is_omap*() expressions from preprocessor directives into something that is only resolvable during runtime, if multiple OMAP2 build targets are selected. Thanks to Tony Lindgren for reporting. Signed-off-by: Paul Walmsley Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h index f2ea645..a018a73 100644 --- a/arch/arm/mach-omap2/cminst44xx.h +++ b/arch/arm/mach-omap2/cminst44xx.h @@ -18,13 +18,36 @@ extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs); extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs); extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); -extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); + +# ifdef CONFIG_ARCH_OMAP4 +extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, + u16 clkctrl_offs); extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs); +# else + +static inline int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, + u16 clkctrl_offs) +{ + return 0; +} + +static inline void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, + s16 cdoffs, u16 clkctrl_offs) +{ +} + +static inline void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs, + u16 clkctrl_offs) +{ +} + +# endif + /* * In an ideal world, we would not export these low-level functions, * but this will probably take some time to fix properly -- cgit v0.10.2 From 96b635977984a88ecdb9cc76b8a54db7297f36e0 Mon Sep 17 00:00:00 2001 From: Wang Shaoyan Date: Wed, 10 Aug 2011 16:01:04 +0800 Subject: ALSA: hda - Add CONFIG_SND_HDA_POWER_SAVE to stac_vrefout_set() In commit 45eebda7, it add new function stac_vrefout_set, but it is only used in code between CONFIG_SND_HDA_POWER_SAVE macro, so add the macro to avoid such warning: sound/pci/hda/patch_sigmatel.c:676:12: warning: 'stac_vrefout_set' defined but not used Signed-off-by: Wang Shaoyan Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index aa376b5..5145b66 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -673,6 +673,7 @@ static int stac92xx_smux_enum_put(struct snd_kcontrol *kcontrol, return 0; } +#ifdef CONFIG_SND_HDA_POWER_SAVE static int stac_vrefout_set(struct hda_codec *codec, hda_nid_t nid, unsigned int new_vref) { @@ -696,6 +697,7 @@ static int stac_vrefout_set(struct hda_codec *codec, return 1; } +#endif static unsigned int stac92xx_vref_set(struct hda_codec *codec, hda_nid_t nid, unsigned int new_vref) -- cgit v0.10.2 From 4eb979d4d182c67acb6272a3a0244bf0027cf16b Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 10 Aug 2011 10:17:07 +0100 Subject: ARM: drop experimental status for ARM_PATCH_PHYS_VIRT This has now been well tested, and several platforms are now selecting this directly. It's time to drop its experimental status. Signed-off-by: Russell King diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2c71a8f..5ebc5d9 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -195,8 +195,7 @@ config VECTORS_BASE The base address of exception vectors. config ARM_PATCH_PHYS_VIRT - bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "Patch physical to virtual translations at runtime" depends on !XIP_KERNEL && MMU depends on !ARCH_REALVIEW || !SPARSEMEM help -- cgit v0.10.2 From a5a3973da8b52944bc5909852714e55771c31ce7 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 10 Aug 2011 11:49:04 +0200 Subject: ALSA: azt3328 - adjust error handling code to include debugging code snd_azf3328_dbgcallenter is called at the very beginning of the function, so it could be useful to call snd_azf3328_dbgcallleave at all exit points. Signed-off-by: Julia Lawall Signed-off-by: Takashi Iwai diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c index e4d76a2..579fc0d 100644 --- a/sound/pci/azt3328.c +++ b/sound/pci/azt3328.c @@ -2625,16 +2625,19 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) int err; snd_azf3328_dbgcallenter(); - if (dev >= SNDRV_CARDS) - return -ENODEV; + if (dev >= SNDRV_CARDS) { + err = -ENODEV; + goto out; + } if (!enable[dev]) { dev++; - return -ENOENT; + err = -ENOENT; + goto out; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) - return err; + goto out; strcpy(card->driver, "AZF3328"); strcpy(card->shortname, "Aztech AZF3328 (PCI168)"); -- cgit v0.10.2 From 5686c4f8250448cdbb15226aa32793df055123c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rapha=C3=ABl=20Ass=C3=A9nat?= Date: Tue, 9 Aug 2011 03:10:12 -0700 Subject: am3505/3517: Various platform defines for UART4 Add missing definitions for the AM3505/3517 UART4 such as DMAs, INTs and base address. Signed-of-by: Raphael Assenat Signed-off-by: Tony Lindgren diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h index d1c916f..dc562a5 100644 --- a/arch/arm/plat-omap/include/plat/dma.h +++ b/arch/arm/plat-omap/include/plat/dma.h @@ -195,6 +195,11 @@ #define OMAP36XX_DMA_UART4_TX 81 /* S_DMA_80 */ #define OMAP36XX_DMA_UART4_RX 82 /* S_DMA_81 */ + +/* Only for AM35xx */ +#define AM35XX_DMA_UART4_TX 54 +#define AM35XX_DMA_UART4_RX 55 + /*----------------------------------------------------------------------------*/ #define OMAP1_DMA_TOUT_IRQ (1 << 0) diff --git a/arch/arm/plat-omap/include/plat/irqs.h b/arch/arm/plat-omap/include/plat/irqs.h index 926d25c..30e1071 100644 --- a/arch/arm/plat-omap/include/plat/irqs.h +++ b/arch/arm/plat-omap/include/plat/irqs.h @@ -357,6 +357,7 @@ #define INT_35XX_EMAC_C0_TX_PULSE_IRQ 69 #define INT_35XX_EMAC_C0_MISC_PULSE_IRQ 70 #define INT_35XX_USBOTG_IRQ 71 +#define INT_35XX_UART4 84 #define INT_35XX_CCDC_VD0_IRQ 88 #define INT_35XX_CCDC_VD1_IRQ 92 #define INT_35XX_CCDC_VD2_IRQ 93 diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h index 2723f91..de3b10c 100644 --- a/arch/arm/plat-omap/include/plat/serial.h +++ b/arch/arm/plat-omap/include/plat/serial.h @@ -56,6 +56,9 @@ #define TI816X_UART2_BASE 0x48022000 #define TI816X_UART3_BASE 0x48024000 +/* AM3505/3517 UART4 */ +#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */ + /* External port on Zoom2/3 */ #define ZOOM_UART_BASE 0x10000000 #define ZOOM_UART_VIRT 0xfa400000 -- cgit v0.10.2 From 1d08fd9f6a7f2541a7b28a21fc638c4640d9cabb Mon Sep 17 00:00:00 2001 From: Oleg Drokin Date: Tue, 9 Aug 2011 03:10:22 -0700 Subject: Update Nook Color machine 3284 to common Encore name Machine database already updated: http://www.arm.linux.org.uk/developer/machines/list.php?id=3284 Signed-off-by: Oleg Drokin Acked-by: Russell King Signed-off-by: Tony Lindgren diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 3b3776d..fff68d0 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -910,7 +910,7 @@ omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280 uemd MACH_UEMD UEMD 3281 ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282 rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283 -nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284 +encore MACH_ENCORE ENCORE 3284 hkdkc100 MACH_HKDKC100 HKDKC100 3285 ts42xx MACH_TS42XX TS42XX 3286 aebl MACH_AEBL AEBL 3287 -- cgit v0.10.2 From ae65eb729dd3d3309fb3b7a1badc7b67cada2357 Mon Sep 17 00:00:00 2001 From: Maxin John Date: Mon, 8 Aug 2011 10:15:46 +0000 Subject: arch:arm:plat-omap:iovmm: remove unused variable 'va' The pointer "va" returned from "phys_to_virt(pa)" is never used in "sgtable_fill_kmalloc()".So,it is safe to remove this set-but-unused variable. Signed-off-by: Maxin B. John Signed-off-by: Tony Lindgren diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c index c60737c..79e7fed 100644 --- a/arch/arm/plat-omap/iovmm.c +++ b/arch/arm/plat-omap/iovmm.c @@ -423,9 +423,6 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, { unsigned int i; struct scatterlist *sg; - void *va; - - va = phys_to_virt(pa); for_each_sg(sgt->sgl, sg, sgt->nents, i) { unsigned bytes; -- cgit v0.10.2 From dccb3b0eb6924b54a8f30672979fdc28e164d997 Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Sat, 6 Aug 2011 09:29:10 +0000 Subject: arm: mach-omap2: mux: use kstrdup() Use kstrdup rather than duplicating its implementation The semantic patch that makes this output is available in scripts/coccinelle/api/kstrdup.cocci. More information about semantic patching is available at http://coccinelle.lip6.fr/ Signed-off-by: Thomas Meyer Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index c7fb22a..655e948 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c @@ -821,11 +821,10 @@ static void __init omap_mux_set_cmdline_signals(void) if (!omap_mux_options) return; - options = kmalloc(strlen(omap_mux_options) + 1, GFP_KERNEL); + options = kstrdup(omap_mux_options, GFP_KERNEL); if (!options) return; - strcpy(options, omap_mux_options); next_opt = options; while ((token = strsep(&next_opt, ",")) != NULL) { @@ -855,24 +854,19 @@ static int __init omap_mux_copy_names(struct omap_mux *src, for (i = 0; i < OMAP_MUX_NR_MODES; i++) { if (src->muxnames[i]) { - dst->muxnames[i] = - kmalloc(strlen(src->muxnames[i]) + 1, - GFP_KERNEL); + dst->muxnames[i] = kstrdup(src->muxnames[i], + GFP_KERNEL); if (!dst->muxnames[i]) goto free; - strcpy(dst->muxnames[i], src->muxnames[i]); } } #ifdef CONFIG_DEBUG_FS for (i = 0; i < OMAP_MUX_NR_SIDES; i++) { if (src->balls[i]) { - dst->balls[i] = - kmalloc(strlen(src->balls[i]) + 1, - GFP_KERNEL); + dst->balls[i] = kstrdup(src->balls[i], GFP_KERNEL); if (!dst->balls[i]) goto free; - strcpy(dst->balls[i], src->balls[i]); } } #endif -- cgit v0.10.2 From 133e6b55b1e8cf48418b4aa44aa7441d4cce86a0 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 9 Aug 2011 16:28:25 +0000 Subject: OMAP3: am3517crane: remove NULL board_mux from board file Since 7203f8a48bb63015ebe58a6f2a38aec1cb208b9d (arm: mach-omap2: remove NULL board_mux from board files) NULL board_mux is defined in mux.h. Signed-off-by: Johan Hovold Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c index 5f2b55f..933e935 100644 --- a/arch/arm/mach-omap2/board-am3517crane.c +++ b/arch/arm/mach-omap2/board-am3517crane.c @@ -45,8 +45,6 @@ static struct omap_board_config_kernel am3517_crane_config[] __initdata = { static struct omap_board_mux board_mux[] __initdata = { { .reg_offset = OMAP_MUX_TERMINATOR }, }; -#else -#define board_mux NULL #endif static void __init am3517_crane_init_early(void) -- cgit v0.10.2 From e9d0b97eef235eccc3df9ddb9895b35c53a8aaa2 Mon Sep 17 00:00:00 2001 From: Hemant Pedanekar Date: Wed, 10 Aug 2011 13:19:35 +0000 Subject: omap: timer: Set dmtimer used as clocksource in autoreload mode If CONFIG_OMAP_32K_TIMER is not selected and dmtimer is used as clocksource, the timer stops counting once overflow occurs as it was not set in autoreload mode. This results into timekeeping failure: for example, 'sleep 1' at the shell after the timer counter overflow would hang. This patch sets up autoreload when starting the clocksource timer which fixes the above issue. Signed-off-by: Hemant Pedanekar Signed-off-by: Tony Lindgren diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index e964072..cf1de7d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -293,7 +293,8 @@ static void __init omap2_gp_clocksource_init(int gptimer_id, pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n", gptimer_id, clksrc.rate); - __omap_dm_timer_load_start(clksrc.io_base, OMAP_TIMER_CTRL_ST, 0, 1); + __omap_dm_timer_load_start(clksrc.io_base, + OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1); init_sched_clock(&cd, dmtimer_update_sched_clock, 32, clksrc.rate); if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) -- cgit v0.10.2 From 56c07271307b4a20802005692b2b70dfe13d72e8 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 9 Aug 2011 04:20:48 +0000 Subject: net: add Documentation/networking/scaling.txt Describes RSS, RPS, RFS, accelerated RFS, and XPS. This version incorporates comments by Randy Dunlap and Rick Jones. Besides text cleanup, it adds an explicit "Suggested Configuration" heading to each section. Signed-off-by: Willem de Bruijn Acked-By: Rick Jones Signed-off-by: David S. Miller diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt new file mode 100644 index 0000000..7254b4b --- /dev/null +++ b/Documentation/networking/scaling.txt @@ -0,0 +1,371 @@ +Scaling in the Linux Networking Stack + + +Introduction +============ + +This document describes a set of complementary techniques in the Linux +networking stack to increase parallelism and improve performance for +multi-processor systems. + +The following technologies are described: + + RSS: Receive Side Scaling + RPS: Receive Packet Steering + RFS: Receive Flow Steering + Accelerated Receive Flow Steering + XPS: Transmit Packet Steering + + +RSS: Receive Side Scaling +========================= + +Contemporary NICs support multiple receive and transmit descriptor queues +(multi-queue). On reception, a NIC can send different packets to different +queues to distribute processing among CPUs. The NIC distributes packets by +applying a filter to each packet that assigns it to one of a small number +of logical flows. Packets for each flow are steered to a separate receive +queue, which in turn can be processed by separate CPUs. This mechanism is +generally known as “Receive-side Scaling” (RSS). The goal of RSS and +the other scaling techniques to increase performance uniformly. +Multi-queue distribution can also be used for traffic prioritization, but +that is not the focus of these techniques. + +The filter used in RSS is typically a hash function over the network +and/or transport layer headers-- for example, a 4-tuple hash over +IP addresses and TCP ports of a packet. The most common hardware +implementation of RSS uses a 128-entry indirection table where each entry +stores a queue number. The receive queue for a packet is determined +by masking out the low order seven bits of the computed hash for the +packet (usually a Toeplitz hash), taking this number as a key into the +indirection table and reading the corresponding value. + +Some advanced NICs allow steering packets to queues based on +programmable filters. For example, webserver bound TCP port 80 packets +can be directed to their own receive queue. Such “n-tuple” filters can +be configured from ethtool (--config-ntuple). + +==== RSS Configuration + +The driver for a multi-queue capable NIC typically provides a kernel +module parameter for specifying the number of hardware queues to +configure. In the bnx2x driver, for instance, this parameter is called +num_queues. A typical RSS configuration would be to have one receive queue +for each CPU if the device supports enough queues, or otherwise at least +one for each cache domain at a particular cache level (L1, L2, etc.). + +The indirection table of an RSS device, which resolves a queue by masked +hash, is usually programmed by the driver at initialization. The +default mapping is to distribute the queues evenly in the table, but the +indirection table can be retrieved and modified at runtime using ethtool +commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the +indirection table could be done to give different queues different +relative weights. + +== RSS IRQ Configuration + +Each receive queue has a separate IRQ associated with it. The NIC triggers +this to notify a CPU when new packets arrive on the given queue. The +signaling path for PCIe devices uses message signaled interrupts (MSI-X), +that can route each interrupt to a particular CPU. The active mapping +of queues to IRQs can be determined from /proc/interrupts. By default, +an IRQ may be handled on any CPU. Because a non-negligible part of packet +processing takes place in receive interrupt handling, it is advantageous +to spread receive interrupts between CPUs. To manually adjust the IRQ +affinity of each interrupt see Documentation/IRQ-affinity. Some systems +will be running irqbalance, a daemon that dynamically optimizes IRQ +assignments and as a result may override any manual settings. + +== Suggested Configuration + +RSS should be enabled when latency is a concern or whenever receive +interrupt processing forms a bottleneck. Spreading load between CPUs +decreases queue length. For low latency networking, the optimal setting +is to allocate as many queues as there are CPUs in the system (or the +NIC maximum, if lower). Because the aggregate number of interrupts grows +with each additional queue, the most efficient high-rate configuration +is likely the one with the smallest number of receive queues where no +CPU that processes receive interrupts reaches 100% utilization. Per-cpu +load can be observed using the mpstat utility. + + +RPS: Receive Packet Steering +============================ + +Receive Packet Steering (RPS) is logically a software implementation of +RSS. Being in software, it is necessarily called later in the datapath. +Whereas RSS selects the queue and hence CPU that will run the hardware +interrupt handler, RPS selects the CPU to perform protocol processing +above the interrupt handler. This is accomplished by placing the packet +on the desired CPU’s backlog queue and waking up the CPU for processing. +RPS has some advantages over RSS: 1) it can be used with any NIC, +2) software filters can easily be added to hash over new protocols, +3) it does not increase hardware device interrupt rate (although it does +introduce inter-processor interrupts (IPIs)). + +RPS is called during bottom half of the receive interrupt handler, when +a driver sends a packet up the network stack with netif_rx() or +netif_receive_skb(). These call the get_rps_cpu() function, which +selects the queue that should process a packet. + +The first step in determining the target CPU for RPS is to calculate a +flow hash over the packet’s addresses or ports (2-tuple or 4-tuple hash +depending on the protocol). This serves as a consistent hash of the +associated flow of the packet. The hash is either provided by hardware +or will be computed in the stack. Capable hardware can pass the hash in +the receive descriptor for the packet; this would usually be the same +hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in +skb->rx_hash and can be used elsewhere in the stack as a hash of the +packet’s flow. + +Each receive hardware queue has an associated list of CPUs to which +RPS may enqueue packets for processing. For each received packet, +an index into the list is computed from the flow hash modulo the size +of the list. The indexed CPU is the target for processing the packet, +and the packet is queued to the tail of that CPU’s backlog queue. At +the end of the bottom half routine, IPIs are sent to any CPUs for which +packets have been queued to their backlog queue. The IPI wakes backlog +processing on the remote CPU, and any queued packets are then processed +up the networking stack. + +==== RPS Configuration + +RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on +by default for SMP). Even when compiled in, RPS remains disabled until +explicitly configured. The list of CPUs to which RPS may forward traffic +can be configured for each receive queue using a sysfs file entry: + + /sys/class/net//queues/rx-/rps_cpus + +This file implements a bitmap of CPUs. RPS is disabled when it is zero +(the default), in which case packets are processed on the interrupting +CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to +the bitmap. + +== Suggested Configuration + +For a single queue device, a typical RPS configuration would be to set +the rps_cpus to the CPUs in the same cache domain of the interrupting +CPU. If NUMA locality is not an issue, this could also be all CPUs in +the system. At high interrupt rate, it might be wise to exclude the +interrupting CPU from the map since that already performs much work. + +For a multi-queue system, if RSS is configured so that a hardware +receive queue is mapped to each CPU, then RPS is probably redundant +and unnecessary. If there are fewer hardware queues than CPUs, then +RPS might be beneficial if the rps_cpus for each queue are the ones that +share the same cache domain as the interrupting CPU for that queue. + + +RFS: Receive Flow Steering +========================== + +While RPS steers packets solely based on hash, and thus generally +provides good load distribution, it does not take into account +application locality. This is accomplished by Receive Flow Steering +(RFS). The goal of RFS is to increase datacache hitrate by steering +kernel processing of packets to the CPU where the application thread +consuming the packet is running. RFS relies on the same RPS mechanisms +to enqueue packets onto the backlog of another CPU and to wake up that +CPU. + +In RFS, packets are not forwarded directly by the value of their hash, +but the hash is used as index into a flow lookup table. This table maps +flows to the CPUs where those flows are being processed. The flow hash +(see RPS section above) is used to calculate the index into this table. +The CPU recorded in each entry is the one which last processed the flow. +If an entry does not hold a valid CPU, then packets mapped to that entry +are steered using plain RPS. Multiple table entries may point to the +same CPU. Indeed, with many flows and few CPUs, it is very likely that +a single application thread handles flows with many different flow hashes. + +rps_sock_table is a global flow table that contains the *desired* CPU for +flows: the CPU that is currently processing the flow in userspace. Each +table value is a CPU index that is updated during calls to recvmsg and +sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() +and tcp_splice_read()). + +When the scheduler moves a thread to a new CPU while it has outstanding +receive packets on the old CPU, packets may arrive out of order. To +avoid this, RFS uses a second flow table to track outstanding packets +for each flow: rps_dev_flow_table is a table specific to each hardware +receive queue of each device. Each table value stores a CPU index and a +counter. The CPU index represents the *current* CPU onto which packets +for this flow are enqueued for further kernel processing. Ideally, kernel +and userspace processing occur on the same CPU, and hence the CPU index +in both tables is identical. This is likely false if the scheduler has +recently migrated a userspace thread while the kernel still has packets +enqueued for kernel processing on the old CPU. + +The counter in rps_dev_flow_table values records the length of the current +CPU's backlog when a packet in this flow was last enqueued. Each backlog +queue has a head counter that is incremented on dequeue. A tail counter +is computed as head counter + queue length. In other words, the counter +in rps_dev_flow_table[i] records the last element in flow i that has +been enqueued onto the currently designated CPU for flow i (of course, +entry i is actually selected by hash and multiple flows may hash to the +same entry i). + +And now the trick for avoiding out of order packets: when selecting the +CPU for packet processing (from get_rps_cpu()) the rps_sock_flow table +and the rps_dev_flow table of the queue that the packet was received on +are compared. If the desired CPU for the flow (found in the +rps_sock_flow table) matches the current CPU (found in the rps_dev_flow +table), the packet is enqueued onto that CPU’s backlog. If they differ, +the current CPU is updated to match the desired CPU if one of the +following is true: + +- The current CPU's queue head counter >= the recorded tail counter + value in rps_dev_flow[i] +- The current CPU is unset (equal to NR_CPUS) +- The current CPU is offline + +After this check, the packet is sent to the (possibly updated) current +CPU. These rules aim to ensure that a flow only moves to a new CPU when +there are no packets outstanding on the old CPU, as the outstanding +packets could arrive later than those about to be processed on the new +CPU. + +==== RFS Configuration + +RFS is only available if the kconfig symbol CONFIG_RFS is enabled (on +by default for SMP). The functionality remains disabled until explicitly +configured. The number of entries in the global flow table is set through: + + /proc/sys/net/core/rps_sock_flow_entries + +The number of entries in the per-queue flow table are set through: + + /sys/class/net//queues/tx-/rps_flow_cnt + +== Suggested Configuration + +Both of these need to be set before RFS is enabled for a receive queue. +Values for both are rounded up to the nearest power of two. The +suggested flow count depends on the expected number of active connections +at any given time, which may be significantly less than the number of open +connections. We have found that a value of 32768 for rps_sock_flow_entries +works fairly well on a moderately loaded server. + +For a single queue device, the rps_flow_cnt value for the single queue +would normally be configured to the same value as rps_sock_flow_entries. +For a multi-queue device, the rps_flow_cnt for each queue might be +configured as rps_sock_flow_entries / N, where N is the number of +queues. So for instance, if rps_flow_entries is set to 32768 and there +are 16 configured receive queues, rps_flow_cnt for each queue might be +configured as 2048. + + +Accelerated RFS +=============== + +Accelerated RFS is to RFS what RSS is to RPS: a hardware-accelerated load +balancing mechanism that uses soft state to steer flows based on where +the application thread consuming the packets of each flow is running. +Accelerated RFS should perform better than RFS since packets are sent +directly to a CPU local to the thread consuming the data. The target CPU +will either be the same CPU where the application runs, or at least a CPU +which is local to the application thread’s CPU in the cache hierarchy. + +To enable accelerated RFS, the networking stack calls the +ndo_rx_flow_steer driver function to communicate the desired hardware +queue for packets matching a particular flow. The network stack +automatically calls this function every time a flow entry in +rps_dev_flow_table is updated. The driver in turn uses a device specific +method to program the NIC to steer the packets. + +The hardware queue for a flow is derived from the CPU recorded in +rps_dev_flow_table. The stack consults a CPU to hardware queue map which +is maintained by the NIC driver. This is an auto-generated reverse map of +the IRQ affinity table shown by /proc/interrupts. Drivers can use +functions in the cpu_rmap (“CPU affinity reverse map”) kernel library +to populate the map. For each CPU, the corresponding queue in the map is +set to be one whose processing CPU is closest in cache locality. + +==== Accelerated RFS Configuration + +Accelerated RFS is only available if the kernel is compiled with +CONFIG_RFS_ACCEL and support is provided by the NIC device and driver. +It also requires that ntuple filtering is enabled via ethtool. The map +of CPU to queues is automatically deduced from the IRQ affinities +configured for each receive queue by the driver, so no additional +configuration should be necessary. + +== Suggested Configuration + +This technique should be enabled whenever one wants to use RFS and the +NIC supports hardware acceleration. + +XPS: Transmit Packet Steering +============================= + +Transmit Packet Steering is a mechanism for intelligently selecting +which transmit queue to use when transmitting a packet on a multi-queue +device. To accomplish this, a mapping from CPU to hardware queue(s) is +recorded. The goal of this mapping is usually to assign queues +exclusively to a subset of CPUs, where the transmit completions for +these queues are processed on a CPU within this set. This choice +provides two benefits. First, contention on the device queue lock is +significantly reduced since fewer CPUs contend for the same queue +(contention can be eliminated completely if each CPU has its own +transmit queue). Secondly, cache miss rate on transmit completion is +reduced, in particular for data cache lines that hold the sk_buff +structures. + +XPS is configured per transmit queue by setting a bitmap of CPUs that +may use that queue to transmit. The reverse mapping, from CPUs to +transmit queues, is computed and maintained for each network device. +When transmitting the first packet in a flow, the function +get_xps_queue() is called to select a queue. This function uses the ID +of the running CPU as a key into the CPU-to-queue lookup table. If the +ID matches a single queue, that is used for transmission. If multiple +queues match, one is selected by using the flow hash to compute an index +into the set. + +The queue chosen for transmitting a particular flow is saved in the +corresponding socket structure for the flow (e.g. a TCP connection). +This transmit queue is used for subsequent packets sent on the flow to +prevent out of order (ooo) packets. The choice also amortizes the cost +of calling get_xps_queues() over all packets in the connection. To avoid +ooo packets, the queue for a flow can subsequently only be changed if +skb->ooo_okay is set for a packet in the flow. This flag indicates that +there are no outstanding packets in the flow, so the transmit queue can +change without the risk of generating out of order packets. The +transport layer is responsible for setting ooo_okay appropriately. TCP, +for instance, sets the flag when all data for a connection has been +acknowledged. + +==== XPS Configuration + +XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by +default for SMP). The functionality remains disabled until explicitly +configured. To enable XPS, the bitmap of CPUs that may use a transmit +queue is configured using the sysfs file entry: + +/sys/class/net//queues/tx-/xps_cpus + +== Suggested Configuration + +For a network device with a single transmission queue, XPS configuration +has no effect, since there is no choice in this case. In a multi-queue +system, XPS is preferably configured so that each CPU maps onto one queue. +If there are as many queues as there are CPUs in the system, then each +queue can also map onto one CPU, resulting in exclusive pairings that +experience no contention. If there are fewer queues than CPUs, then the +best CPUs to share a given queue are probably those that share the cache +with the CPU that processes transmit completions for that queue +(transmit interrupts). + + +Further Information +=================== +RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into +2.6.38. Original patches were submitted by Tom Herbert +(therbert@google.com) + +Accelerated RFS was introduced in 2.6.35. Original patches were +submitted by Ben Hutchings (bhutchings@solarflare.com) + +Authors: +Tom Herbert (therbert@google.com) +Willem de Bruijn (willemb@google.com) -- cgit v0.10.2 From a85fe3fce84335f83be17a7659bfbb3a71dc2fc4 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 11 Aug 2011 01:15:44 +1000 Subject: powerpc: Really fix build without CONFIG_PCI Brown paper bag day, previous commit wouldn't work very well with modules enabled. Move the exports into the ifdef. Signed-off-by: Benjamin Herrenschmidt diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index faca64a..b25f632 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -144,7 +144,7 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr) return; iounmap(addr); } -#endif /* CONFIG_PCI */ EXPORT_SYMBOL(pci_iomap); EXPORT_SYMBOL(pci_iounmap); +#endif /* CONFIG_PCI */ -- cgit v0.10.2 From fc8ed7be738ffb1b3b0140ed2de6def38b9a7101 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 10 Aug 2011 12:42:26 -0300 Subject: perf top browser: Remove spurious helpline update It will be immediately replaced in perf_top_browser__run. Cc: David Ahern Cc: Frederic Weisbecker Cc: Mike Galbraith Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-q7e2jzb44elqpkvdllk94x0i@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c index 5a06538..88403cf 100644 --- a/tools/perf/util/ui/browsers/top.c +++ b/tools/perf/util/ui/browsers/top.c @@ -208,6 +208,5 @@ int perf_top__tui_browser(struct perf_top *top) }, }; - ui_helpline__push("Press <- or ESC to exit"); return perf_top_browser__run(&browser); } -- cgit v0.10.2 From ac9cf9ff4f4ffa355d0e93c2bd6d74961a16efad Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 5 Aug 2011 12:24:44 +0200 Subject: mach-sa1100: fix PCI build problem The PCI nanoengine driver in the SA1100 machine probably has not been building for some time. It probably dragged hardware.h in implicitly and now it doesn't anymore. After this an SA1100 build selecting all system variants will build successfully. Signed-off-by: Linus Walleij diff --git a/arch/arm/mach-sa1100/pci-nanoengine.c b/arch/arm/mach-sa1100/pci-nanoengine.c index 964c6c3..dd39fee 100644 --- a/arch/arm/mach-sa1100/pci-nanoengine.c +++ b/arch/arm/mach-sa1100/pci-nanoengine.c @@ -28,6 +28,7 @@ #include #include +#include static DEFINE_SPINLOCK(nano_lock); -- cgit v0.10.2 From af9d220bac41dc3201893e1601cc7c44f7da4498 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 10 Aug 2011 14:43:30 +0200 Subject: EDAC: Correct Kconfig dependencies Both AMD and Intel i7 EDAC drivers use MCE features and are thus dependent of this functionality present in the kernel. Express this in Kconfig so that randconfig builds don't break. Reported-by: Randy Dunlap Signed-off-by: Borislav Petkov Acked-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index af1a17d..c422fea 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -41,7 +41,7 @@ config EDAC_DEBUG config EDAC_DECODE_MCE tristate "Decode MCEs in human-readable form (only on AMD for now)" - depends on CPU_SUP_AMD && X86_MCE + depends on CPU_SUP_AMD && X86_MCE_AMD default y ---help--- Enable this option if you want to decode Machine Check Exceptions @@ -173,8 +173,7 @@ config EDAC_I5400 config EDAC_I7CORE tristate "Intel i7 Core (Nehalem) processors" - depends on EDAC_MM_EDAC && PCI && X86 - select EDAC_MCE + depends on EDAC_MM_EDAC && PCI && X86 && X86_MCE_INTEL help Support for error detection and correction the Intel i7 Core (Nehalem) Integrated Memory Controller that exists on -- cgit v0.10.2 From 15439bde3af7ff88459ea2b5520b77312e958df2 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Fri, 5 Aug 2011 13:49:52 +0200 Subject: ALSA: snd-usb-caiaq: Correct offset fields of outbound iso_frame_desc This fixes faulty outbount packets in case the inbound packets received from the hardware are fragmented and contain bogus input iso frames. The bug has been there for ages, but for some strange reasons, it was only triggered by newer machines in 64bit mode. Signed-off-by: Daniel Mack Reported-and-tested-by: William Light Reported-by: Pedro Ribeiro Cc: stable@kernel.org Signed-off-by: Takashi Iwai diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index d0d493c..aa52b3e 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -614,6 +614,7 @@ static void read_completed(struct urb *urb) struct snd_usb_caiaqdev *dev; struct urb *out; int frame, len, send_it = 0, outframe = 0; + size_t offset = 0; if (urb->status || !info) return; @@ -634,7 +635,8 @@ static void read_completed(struct urb *urb) len = urb->iso_frame_desc[outframe].actual_length; out->iso_frame_desc[outframe].length = len; out->iso_frame_desc[outframe].actual_length = 0; - out->iso_frame_desc[outframe].offset = BYTES_PER_FRAME * frame; + out->iso_frame_desc[outframe].offset = offset; + offset += len; if (len > 0) { spin_lock(&dev->spinlock); @@ -650,7 +652,7 @@ static void read_completed(struct urb *urb) } if (send_it) { - out->number_of_packets = FRAMES_PER_URB; + out->number_of_packets = outframe; out->transfer_flags = URB_ISO_ASAP; usb_submit_urb(out, GFP_ATOMIC); } -- cgit v0.10.2 From 059c4383550b158bc1b6d34d8ab085e81cb3d71b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 8 Aug 2011 13:18:03 +0200 Subject: drivers/net/wireless/wl1251: add missing kfree In each case, the kfree already at the end of the function is also needed in the error case. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @exists@ local idexpression x; statement S,S1; expression E; identifier fl; expression *ptr != NULL; @@ x = \(kmalloc\|kzalloc\|kcalloc\)(...); ... if (x == NULL) S <... when != x when != if (...) { <+...kfree(x)...+> } when any when != true x == NULL x->fl ...> ( if (x == NULL) S1 | if (...) { ... when != x when forall ( return \(0\|<+...x...+>\|ptr\); | * return ...; ) } ) // Signed-off-by: Julia Lawall Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c index ef8370e..ad87a1a 100644 --- a/drivers/net/wireless/wl1251/acx.c +++ b/drivers/net/wireless/wl1251/acx.c @@ -140,8 +140,6 @@ int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth) auth->sleep_auth = sleep_auth; ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); - if (ret < 0) - return ret; out: kfree(auth); @@ -681,10 +679,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl) ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD, detection, sizeof(*detection)); - if (ret < 0) { + if (ret < 0) wl1251_warning("failed to set cca threshold: %d", ret); - return ret; - } out: kfree(detection); diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c index 81f164b..d14d69d 100644 --- a/drivers/net/wireless/wl1251/cmd.c +++ b/drivers/net/wireless/wl1251/cmd.c @@ -241,7 +241,7 @@ int wl1251_cmd_data_path(struct wl1251 *wl, u8 channel, bool enable) if (ret < 0) { wl1251_error("tx %s cmd for channel %d failed", enable ? "start" : "stop", channel); - return ret; + goto out; } wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d", -- cgit v0.10.2 From 30eefc95841ce51c3281876f0b954dd1d3c0bd5f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 10 Aug 2011 11:22:42 -0700 Subject: xen: xen-selfballoon.c needs more header files Fix build errors (found when CONFIG_SYSFS is not enabled): drivers/xen/xen-selfballoon.c:446: warning: data definition has no type or storage class drivers/xen/xen-selfballoon.c:446: warning: type defaults to 'int' in declaration of 'EXPORT_SYMBOL' drivers/xen/xen-selfballoon.c:446: warning: parameter names (without types) in function declaration drivers/xen/xen-selfballoon.c:485: error: expected declaration specifiers or '...' before string constant drivers/xen/xen-selfballoon.c:485: warning: data definition has no type or storage class drivers/xen/xen-selfballoon.c:485: warning: type defaults to 'int' in declaration of 'MODULE_LICENSE' drivers/xen/xen-selfballoon.c:485: warning: function declaration isn't a prototype Signed-off-by: Randy Dunlap Signed-off-by: Konrad Rzeszutek Wilk diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 1b4afd8..6ea852e 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From 9e978d8f7db1c5de7cdc6450a8ca208db3b95f84 Mon Sep 17 00:00:00 2001 From: Ajeet Yadav Date: Fri, 29 Jul 2011 07:42:59 +0000 Subject: "xfs: fix error handling for synchronous writes" revisited xfs: fix for hang during synchronous buffer write error If removed storage while synchronous buffer write underway, "xfslogd" hangs. Detailed log http://oss.sgi.com/archives/xfs/2011-07/msg00740.html Related work bfc60177f8ab509bc225becbb58f7e53a0e33e81 "xfs: fix error handling for synchronous writes" Given that xfs_bwrite actually does the shutdown already after waiting for the b_iodone completion and given that we actually found that calling xfs_force_shutdown from inside xfs_buf_iodone_callbacks was a major contributor the problem it better to drop this call. Signed-off-by: Ajeet Yadav Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 0402173..cac2ecf 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -1010,7 +1010,6 @@ xfs_buf_iodone_callbacks( XFS_BUF_UNDELAYWRITE(bp); trace_xfs_buf_error_relse(bp, _RET_IP_); - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); do_callbacks: xfs_buf_do_callbacks(bp); -- cgit v0.10.2 From f3fb5b7bb70d6e679c15fef85707810a067f5fb6 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2011 11:15:30 -0400 Subject: x86: Remove unnecessary compile flag tweaks for vsyscall code As of commit 98d0ac38ca7b1b7a552c9a2359174ff84decb600 Author: Andy Lutomirski Date: Thu Jul 14 06:47:22 2011 -0400 x86-64: Move vread_tsc and vread_hpet into the vDSO user code no longer directly calls into code in arch/x86/kernel/, so we don't need compile flag hacks to make it safe. All vdso code is in the vdso directory now. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/835cd05a4c7740544d09723d6ba48f4406f9826c.1312988155.git.luto@mit.edu Signed-off-by: H. Peter Anvin diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 2deef3d..3d1ac39 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -17,19 +17,6 @@ CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg endif -# -# vsyscalls (which work on the user stack) should have -# no stack-protector checks: -# -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) -CFLAGS_hpet.o := $(nostackp) -CFLAGS_paravirt.o := $(nostackp) -GCOV_PROFILE_vsyscall_64.o := n -GCOV_PROFILE_hpet.o := n -GCOV_PROFILE_tsc.o := n -GCOV_PROFILE_paravirt.o := n - obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o obj-y += time.o ioport.o ldt.o dumpstack.o diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 93a0d46..bf8e9ff 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -18,9 +18,6 @@ * use the vDSO. */ -/* Disable profiling for userspace code: */ -#define DISABLE_BRANCH_PROFILING - #include #include #include -- cgit v0.10.2 From fce8dc06423d6fb2709469dc5c55b04e09c1d126 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2011 11:15:31 -0400 Subject: x86-64: Wire up getcpu syscall getcpu is available as a vdso entry and an emulated vsyscall. Programs that for some reason don't want to use the vdso should still be able to call getcpu without relying on the slow emulated vsyscall. It costs almost nothing to expose it as a real syscall. We also need this for the following patch in vsyscall=native mode. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/6b19f55bdb06a0c32c2fa6dba9b6f222e1fde999.1312988155.git.luto@mit.edu Signed-off-by: H. Peter Anvin diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 705bf13..d92641c 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h @@ -681,6 +681,8 @@ __SYSCALL(__NR_syncfs, sys_syncfs) __SYSCALL(__NR_sendmmsg, sys_sendmmsg) #define __NR_setns 308 __SYSCALL(__NR_setns, sys_setns) +#define __NR_getcpu 309 +__SYSCALL(__NR_getcpu, sys_getcpu) #ifndef __NO_STUBS #define __ARCH_WANT_OLD_READDIR -- cgit v0.10.2 From 3ae36655b97a03fa1decf72f04078ef945647c1a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 10 Aug 2011 11:15:32 -0400 Subject: x86-64: Rework vsyscall emulation and add vsyscall= parameter There are three choices: vsyscall=native: Vsyscalls are native code that issues the corresponding syscalls. vsyscall=emulate (default): Vsyscalls are emulated by instruction fault traps, tested in the bad_area path. The actual contents of the vsyscall page is the same as the vsyscall=native case except that it's marked NX. This way programs that make assumptions about what the code in the page does will not be confused when they read that code. vsyscall=none: Trying to execute a vsyscall will segfault. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/8449fb3abf89851fd6b2260972666a6f82542284.1312988155.git.luto@mit.edu Signed-off-by: H. Peter Anvin diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index aa47be7..9cfd6bb 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2657,6 +2657,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted. vmpoff= [KNL,S390] Perform z/VM CP command after power off. Format: + vsyscall= [X86-64] + Controls the behavior of vsyscalls (i.e. calls to + fixed addresses of 0xffffffffff600x00 from legacy + code). Most statically-linked binaries and older + versions of glibc use these calls. Because these + functions are at fixed addresses, they make nice + targets for exploits that can control RIP. + + emulate [default] Vsyscalls turn into traps and are + emulated reasonably safely. + + native Vsyscalls are native syscall instructions. + This is a little bit faster than trapping + and makes a few dynamic recompilers work + better than they would in emulation mode. + It also makes exploits much easier to write. + + none Vsyscalls don't work at all. This makes + them quite hard to use for exploits but + might break your system. + vt.cur_default= [VT] Default cursor shape. Format: 0xCCBBAA, where AA, BB, and CC are the same as the parameters of the [?A;B;Cc escape sequence; diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index a563c50..2c224e1 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -17,7 +17,6 @@ * Vectors 0 ... 31 : system traps and exceptions - hardcoded events * Vectors 32 ... 127 : device interrupts * Vector 128 : legacy int80 syscall interface - * Vector 204 : legacy x86_64 vsyscall emulation * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts * Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts * @@ -51,9 +50,6 @@ #ifdef CONFIG_X86_32 # define SYSCALL_VECTOR 0x80 #endif -#ifdef CONFIG_X86_64 -# define VSYSCALL_EMU_VECTOR 0xcc -#endif /* * Vectors 0x30-0x3f are used for ISA interrupts. diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 2bae0a5..0012d09 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -40,7 +40,6 @@ asmlinkage void alignment_check(void); asmlinkage void machine_check(void); #endif /* CONFIG_X86_MCE */ asmlinkage void simd_coprocessor_error(void); -asmlinkage void emulate_vsyscall(void); dotraplinkage void do_divide_error(struct pt_regs *, long); dotraplinkage void do_debug(struct pt_regs *, long); @@ -67,7 +66,6 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long); dotraplinkage void do_machine_check(struct pt_regs *, long); #endif dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long); -dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *, long); #endif diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index 6010707..eaea1d3 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -27,6 +27,12 @@ extern struct timezone sys_tz; extern void map_vsyscall(void); +/* + * Called on instruction fetch fault in vsyscall page. + * Returns true if handled. + */ +extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address); + #endif /* __KERNEL__ */ #endif /* _ASM_X86_VSYSCALL_H */ diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e949793..46792d9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1123,7 +1123,6 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug zeroentry coprocessor_error do_coprocessor_error errorentry alignment_check do_alignment_check zeroentry simd_coprocessor_error do_simd_coprocessor_error -zeroentry emulate_vsyscall do_emulate_vsyscall /* Reload gs selector with exception handling */ diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index fbc097a..b9b6716 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -872,12 +872,6 @@ void __init trap_init(void) set_bit(SYSCALL_VECTOR, used_vectors); #endif -#ifdef CONFIG_X86_64 - BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors)); - set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall); - set_bit(VSYSCALL_EMU_VECTOR, used_vectors); -#endif - /* * Should be a barrier for any external CPU state: */ diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 8f3a265..0f703f1 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -71,7 +71,6 @@ PHDRS { text PT_LOAD FLAGS(5); /* R_E */ data PT_LOAD FLAGS(6); /* RW_ */ #ifdef CONFIG_X86_64 - user PT_LOAD FLAGS(5); /* R_E */ #ifdef CONFIG_SMP percpu PT_LOAD FLAGS(6); /* RW_ */ #endif @@ -174,38 +173,6 @@ SECTIONS . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); -#define VSYSCALL_ADDR (-10*1024*1024) - -#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET) -#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) - -#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) -#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) - - __vsyscall_0 = .; - - . = VSYSCALL_ADDR; - .vsyscall : AT(VLOAD(.vsyscall)) { - /* work around gold bug 13023 */ - __vsyscall_beginning_hack = .; - *(.vsyscall_0) - - . = __vsyscall_beginning_hack + 1024; - *(.vsyscall_1) - - . = __vsyscall_beginning_hack + 2048; - *(.vsyscall_2) - - . = __vsyscall_beginning_hack + 4096; /* Pad the whole page. */ - } :user =0xcc - . = ALIGN(__vsyscall_0 + PAGE_SIZE, PAGE_SIZE); - -#undef VSYSCALL_ADDR -#undef VLOAD_OFFSET -#undef VLOAD -#undef VVIRT_OFFSET -#undef VVIRT - #endif /* CONFIG_X86_64 */ /* Init code and data - will be freed after init */ diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index bf8e9ff..18ae83d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -56,6 +56,27 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), }; +static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; + +static int __init vsyscall_setup(char *str) +{ + if (str) { + if (!strcmp("emulate", str)) + vsyscall_mode = EMULATE; + else if (!strcmp("native", str)) + vsyscall_mode = NATIVE; + else if (!strcmp("none", str)) + vsyscall_mode = NONE; + else + return -EINVAL; + + return 0; + } + + return -EINVAL; +} +early_param("vsyscall", vsyscall_setup); + void update_vsyscall_tz(void) { unsigned long flags; @@ -100,7 +121,7 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", level, tsk->comm, task_pid_nr(tsk), - message, regs->ip - 2, regs->cs, + message, regs->ip, regs->cs, regs->sp, regs->ax, regs->si, regs->di); } @@ -118,45 +139,39 @@ static int addr_to_vsyscall_nr(unsigned long addr) return nr; } -void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) +bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) { struct task_struct *tsk; unsigned long caller; int vsyscall_nr; long ret; - local_irq_enable(); + /* + * No point in checking CS -- the only way to get here is a user mode + * trap to a high address, which means that we're in 64-bit user code. + */ - if (!user_64bit_mode(regs)) { - /* - * If we trapped from kernel mode, we might as well OOPS now - * instead of returning to some random address and OOPSing - * then. - */ - BUG_ON(!user_mode(regs)); + WARN_ON_ONCE(address != regs->ip); - /* Compat mode and non-compat 32-bit CS should both segfault. */ - warn_bad_vsyscall(KERN_WARNING, regs, - "illegal int 0xcc from 32-bit mode"); - goto sigsegv; + if (vsyscall_mode == NONE) { + warn_bad_vsyscall(KERN_INFO, regs, + "vsyscall attempted with vsyscall=none"); + return false; } - /* - * x86-ism here: regs->ip points to the instruction after the int 0xcc, - * and int 0xcc is two bytes long. - */ - vsyscall_nr = addr_to_vsyscall_nr(regs->ip - 2); + vsyscall_nr = addr_to_vsyscall_nr(address); trace_emulate_vsyscall(vsyscall_nr); if (vsyscall_nr < 0) { warn_bad_vsyscall(KERN_WARNING, regs, - "illegal int 0xcc (exploit attempt?)"); + "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround"); goto sigsegv; } if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { - warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)"); + warn_bad_vsyscall(KERN_WARNING, regs, + "vsyscall with bad stack (exploit attempt?)"); goto sigsegv; } @@ -201,13 +216,11 @@ void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code) regs->ip = caller; regs->sp += 8; - local_irq_disable(); - return; + return true; sigsegv: - regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */ force_sig(SIGSEGV, current); - local_irq_disable(); + return true; } /* @@ -255,15 +268,21 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) void __init map_vsyscall(void) { - extern char __vsyscall_0; - unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0); + extern char __vsyscall_page; + unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); extern char __vvar_page; unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); - /* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */ - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL); + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, + vsyscall_mode == NATIVE + ? PAGE_KERNEL_VSYSCALL + : PAGE_KERNEL_VVAR); + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != + (unsigned long)VSYSCALL_START); + __set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR); - BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != (unsigned long)VVAR_ADDRESS); + BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) != + (unsigned long)VVAR_ADDRESS); } static int __init vsyscall_init(void) diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S index ffa845e..c9596a9 100644 --- a/arch/x86/kernel/vsyscall_emu_64.S +++ b/arch/x86/kernel/vsyscall_emu_64.S @@ -7,21 +7,31 @@ */ #include + #include +#include +#include + +__PAGE_ALIGNED_DATA + .globl __vsyscall_page + .balign PAGE_SIZE, 0xcc + .type __vsyscall_page, @object +__vsyscall_page: + + mov $__NR_gettimeofday, %rax + syscall + ret -/* The unused parts of the page are filled with 0xcc by the linker script. */ + .balign 1024, 0xcc + mov $__NR_time, %rax + syscall + ret -.section .vsyscall_0, "a" -ENTRY(vsyscall_0) - int $VSYSCALL_EMU_VECTOR -END(vsyscall_0) + .balign 1024, 0xcc + mov $__NR_getcpu, %rax + syscall + ret -.section .vsyscall_1, "a" -ENTRY(vsyscall_1) - int $VSYSCALL_EMU_VECTOR -END(vsyscall_1) + .balign 4096, 0xcc -.section .vsyscall_2, "a" -ENTRY(vsyscall_2) - int $VSYSCALL_EMU_VECTOR -END(vsyscall_2) + .size __vsyscall_page, 4096 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c1d0182..e58935c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -720,6 +720,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, if (is_errata100(regs, address)) return; +#ifdef CONFIG_X86_64 + /* + * Instruction fetch faults in the vsyscall page might need + * emulation. + */ + if (unlikely((error_code & PF_INSTR) && + ((address & ~0xfff) == VSYSCALL_START))) { + if (emulate_vsyscall(regs, address)) + return; + } +#endif + if (unlikely(show_unhandled_signals)) show_signal_msg(regs, error_code, address, tsk); -- cgit v0.10.2 From 280ec8b718e8565333ace339d6bba91239440b20 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 10 Aug 2011 22:19:19 +0900 Subject: ASoC: Add missing break in WM8994 probe This error would have no effect on current silicon revisions, the fall through case has the same behaviour. Signed-off-by: Mark Brown Acked-by: Liam Girdwood diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 09e680a..b393f9f 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -2981,6 +2981,7 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) wm8994->hubs.dcs_readback_mode = 1; break; } + break; case WM8958: wm8994->hubs.dcs_readback_mode = 1; -- cgit v0.10.2 From 392ba787bcc9b8d4786fb94949d270ec7414da55 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Thu, 11 Aug 2011 09:56:06 +0800 Subject: ARM: pxa: fix logic error in PJ4 iWMMXt handling This got added in: commit ef6c84454f8567d4968c210d7d194fb711ed3739 Author: Haojian Zhuang Date: Wed Nov 24 11:54:25 2010 +0800 ARM: pxa: add iwmmx support for PJ4 which does: - mrc p15, 0, r2, c15, c1, 0 - orr r2, r2, #0x3 @ enable access to CP0 and CP1 - mcr p15, 0, r2, c15, c1, 0 + @ enable access to CP0 and CP1 + XSC(mrc p15, 0, r2, c15, c1, 0) + XSC(orr r2, r2, #0x3) + XSC(mcr p15, 0, r2, c15, c1, 0) but then later does: - mrc p15, 0, r4, c15, c1, 0 - orr r4, r4, #0x3 @ enable access to CP0 and CP1 - mcr p15, 0, r4, c15, c1, 0 + @ enable access to CP0 and CP1 + XSC(mrc p15, 0, r4, c15, c1, 0) + XSC(orr r4, r4, #0xf) + XSC(mcr p15, 0, r4, c15, c1, 0) Signed-off-by: Lennert Buytenhek Acked-by Haojian Signed-off-by: Eric Miao diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 7fa3bb0..a087838 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -195,10 +195,10 @@ ENTRY(iwmmxt_task_disable) @ enable access to CP0 and CP1 XSC(mrc p15, 0, r4, c15, c1, 0) - XSC(orr r4, r4, #0xf) + XSC(orr r4, r4, #0x3) XSC(mcr p15, 0, r4, c15, c1, 0) PJ4(mrc p15, 0, r4, c1, c0, 2) - PJ4(orr r4, r4, #0x3) + PJ4(orr r4, r4, #0xf) PJ4(mcr p15, 0, r4, c1, c0, 2) mov r0, #0 @ nothing to load @@ -313,7 +313,7 @@ ENTRY(iwmmxt_task_switch) teq r2, r3 @ next task owns it? movne pc, lr @ no: leave Concan disabled -1: @ flip Conan access +1: @ flip Concan access XSC(eor r1, r1, #0x3) XSC(mcr p15, 0, r1, c15, c1, 0) PJ4(eor r1, r1, #0xf) -- cgit v0.10.2 From 3647a40f3677bc76f85d25bfe56f096e2a44d899 Mon Sep 17 00:00:00 2001 From: Tanmay Upadhyay Date: Thu, 14 Jul 2011 15:07:15 +0530 Subject: ARM: pxa168/gplugd: get rid of mfp-gplugd.h Move definitions from mfp-gplugd.h to mfp-pxa168.h as they aren't gplugD specific. Signed-off-by: Tanmay Upadhyay Signed-off-by: Eric Miao diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index c070c24..0770e51 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -16,16 +16,18 @@ #include #include #include -#include #include "common.h" static unsigned long gplugd_pin_config[] __initdata = { /* UART3 */ - GPIO8_UART3_SOUT, - GPIO9_UART3_SIN, - GPI1O_UART3_CTS, - GPI11_UART3_RTS, + GPIO8_UART3_TXD, + GPIO9_UART3_RXD, + GPIO1O_UART3_CTS, + GPIO11_UART3_RTS, + + /* USB OTG PEN */ + GPIO18_GPIO, /* MMC2 */ GPIO28_MMC2_CMD, @@ -109,6 +111,12 @@ static unsigned long gplugd_pin_config[] __initdata = { GPIO105_CI2C_SDA, GPIO106_CI2C_SCL, + /* SPI NOR Flash on SSP2 */ + GPIO107_SSP2_RXD, + GPIO108_SSP2_TXD, + GPIO110_GPIO, /* SPI_CSn */ + GPIO111_SSP2_CLK, + /* Select JTAG */ GPIO109_GPIO, diff --git a/arch/arm/mach-mmp/include/mach/mfp-gplugd.h b/arch/arm/mach-mmp/include/mach/mfp-gplugd.h deleted file mode 100644 index b8cf38d..0000000 --- a/arch/arm/mach-mmp/include/mach/mfp-gplugd.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * linux/arch/arm/mach-mmp/include/mach/mfp-gplugd.h - * - * MFP definitions used in gplugD - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __MACH_MFP_GPLUGD_H -#define __MACH_MFP_GPLUGD_H - -#include -#include - -/* UART3 */ -#define GPIO8_UART3_SOUT MFP_CFG(GPIO8, AF2) -#define GPIO9_UART3_SIN MFP_CFG(GPIO9, AF2) -#define GPI1O_UART3_CTS MFP_CFG(GPIO10, AF2) -#define GPI11_UART3_RTS MFP_CFG(GPIO11, AF2) - -/* MMC2 */ -#define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST) -#define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST) -#define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST) -#define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST) -#define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST) -#define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST) - -/* I2S */ -#undef GPIO114_I2S_FRM -#undef GPIO115_I2S_BCLK - -#define GPIO114_I2S_FRM MFP_CFG_DRV(GPIO114, AF1, FAST) -#define GPIO115_I2S_BCLK MFP_CFG_DRV(GPIO115, AF1, FAST) -#define GPIO116_I2S_TXD MFP_CFG_DRV(GPIO116, AF1, FAST) - -/* MMC4 */ -#define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST) -#define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST) -#define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST) -#define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST) -#define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST) -#define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST) - -/* OTG GPIO */ -#define GPIO_USB_OTG_PEN 18 -#define GPIO_USB_OIDIR 20 - -/* Other GPIOs are 35, 84, 85 */ -#endif /* __MACH_MFP_GPLUGD_H */ diff --git a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h index 8c78232..92aaa3c 100644 --- a/arch/arm/mach-mmp/include/mach/mfp-pxa168.h +++ b/arch/arm/mach-mmp/include/mach/mfp-pxa168.h @@ -203,6 +203,10 @@ #define GPIO33_CF_nCD2 MFP_CFG(GPIO33, AF3) /* UART */ +#define GPIO8_UART3_TXD MFP_CFG(GPIO8, AF2) +#define GPIO9_UART3_RXD MFP_CFG(GPIO9, AF2) +#define GPIO1O_UART3_CTS MFP_CFG(GPIO10, AF2) +#define GPIO11_UART3_RTS MFP_CFG(GPIO11, AF2) #define GPIO88_UART2_TXD MFP_CFG(GPIO88, AF2) #define GPIO89_UART2_RXD MFP_CFG(GPIO89, AF2) #define GPIO107_UART1_TXD MFP_CFG_DRV(GPIO107, AF1, FAST) @@ -232,6 +236,22 @@ #define GPIO53_MMC1_CD MFP_CFG(GPIO53, AF1) #define GPIO46_MMC1_WP MFP_CFG(GPIO46, AF1) +/* MMC2 */ +#define GPIO28_MMC2_CMD MFP_CFG_DRV(GPIO28, AF6, FAST) +#define GPIO29_MMC2_CLK MFP_CFG_DRV(GPIO29, AF6, FAST) +#define GPIO30_MMC2_DAT0 MFP_CFG_DRV(GPIO30, AF6, FAST) +#define GPIO31_MMC2_DAT1 MFP_CFG_DRV(GPIO31, AF6, FAST) +#define GPIO32_MMC2_DAT2 MFP_CFG_DRV(GPIO32, AF6, FAST) +#define GPIO33_MMC2_DAT3 MFP_CFG_DRV(GPIO33, AF6, FAST) + +/* MMC4 */ +#define GPIO125_MMC4_DAT3 MFP_CFG_DRV(GPIO125, AF7, FAST) +#define GPIO126_MMC4_DAT2 MFP_CFG_DRV(GPIO126, AF7, FAST) +#define GPIO127_MMC4_DAT1 MFP_CFG_DRV(GPIO127, AF7, FAST) +#define GPIO0_2_MMC4_DAT0 MFP_CFG_DRV(GPIO0_2, AF7, FAST) +#define GPIO1_2_MMC4_CMD MFP_CFG_DRV(GPIO1_2, AF7, FAST) +#define GPIO2_2_MMC4_CLK MFP_CFG_DRV(GPIO2_2, AF7, FAST) + /* LCD */ #define GPIO84_LCD_CS MFP_CFG(GPIO84, AF1) #define GPIO60_LCD_DD0 MFP_CFG(GPIO60, AF1) @@ -269,11 +289,12 @@ #define GPIO106_CI2C_SCL MFP_CFG(GPIO106, AF1) /* I2S */ -#define GPIO113_I2S_MCLK MFP_CFG(GPIO113,AF6) -#define GPIO114_I2S_FRM MFP_CFG(GPIO114,AF1) -#define GPIO115_I2S_BCLK MFP_CFG(GPIO115,AF1) -#define GPIO116_I2S_RXD MFP_CFG(GPIO116,AF2) -#define GPIO117_I2S_TXD MFP_CFG(GPIO117,AF2) +#define GPIO113_I2S_MCLK MFP_CFG(GPIO113, AF6) +#define GPIO114_I2S_FRM MFP_CFG(GPIO114, AF1) +#define GPIO115_I2S_BCLK MFP_CFG(GPIO115, AF1) +#define GPIO116_I2S_RXD MFP_CFG(GPIO116, AF2) +#define GPIO116_I2S_TXD MFP_CFG(GPIO116, AF1) +#define GPIO117_I2S_TXD MFP_CFG(GPIO117, AF2) /* PWM */ #define GPIO96_PWM3_OUT MFP_CFG(GPIO96, AF1) @@ -324,4 +345,10 @@ #define GPIO101_MII_MDIO MFP_CFG(GPIO101, AF5) #define GPIO103_RX_DV MFP_CFG(GPIO103, AF5) +/* SSP2 */ +#define GPIO107_SSP2_RXD MFP_CFG(GPIO107, AF4) +#define GPIO108_SSP2_TXD MFP_CFG(GPIO108, AF4) +#define GPIO111_SSP2_CLK MFP_CFG(GPIO111, AF4) +#define GPIO112_SSP2_FRM MFP_CFG(GPIO112, AF4) + #endif /* __ASM_MACH_MFP_PXA168_H */ -- cgit v0.10.2 From 4c22ea8f449ce837dd84965badca8e10f1f4094f Mon Sep 17 00:00:00 2001 From: Tanmay Upadhyay Date: Thu, 14 Jul 2011 15:07:16 +0530 Subject: ARM: pxa168/gplugd: free correct GPIO Signed-off-by: Tanmay Upadhyay Signed-off-by: Eric Miao diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index 0770e51..98e25d9 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c @@ -162,7 +162,7 @@ static void __init select_disp_freq(void) "frequency\n"); } else { gpio_direction_output(35, 1); - gpio_free(104); + gpio_free(35); } if (unlikely(gpio_request(85, "DISP_FREQ_SEL_2"))) { @@ -170,7 +170,7 @@ static void __init select_disp_freq(void) "frequency\n"); } else { gpio_direction_output(85, 0); - gpio_free(104); + gpio_free(85); } } -- cgit v0.10.2 From 7ce5ae39c46f159aee3b3427844f1491e1ccac74 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 10 Aug 2011 02:36:59 +0800 Subject: ARM: mmp: Also start timer 1 on boot. Currently, arch-mmp/time.c uses timer 0 both as a clocksource timer and as a clockevent timer, the latter by setting up a comparator interrupt to match on 'current_time + delta'. This is problematic if delta is small enough, as that can lead to 'current_time + delta' already being in the past when comparator setup has finished, leading to the requested event not triggering. As there is also a silicon issue that requires stopping a timer's counter while writing to one of its match registers, we'll switch to using two separate timers -- timer 0 as clockevent timer, which we'll start and stop on every invocation of ->set_next_event(), and timer 1 as clocksource timer, which will be free-running. This first patch enables timer 1 on boot, so that we can use it as clocksource timer. Signed-off-by: Lennert Buytenhek Acked-by: Haojian Zhuang Signed-off-by: Eric Miao diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 99833b9..09e88c2 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -145,23 +145,26 @@ static struct clocksource cksrc = { static void __init timer_config(void) { uint32_t ccr = __raw_readl(TIMERS_VIRT_BASE + TMR_CCR); - uint32_t cer = __raw_readl(TIMERS_VIRT_BASE + TMR_CER); - uint32_t cmr = __raw_readl(TIMERS_VIRT_BASE + TMR_CMR); - __raw_writel(cer & ~0x1, TIMERS_VIRT_BASE + TMR_CER); /* disable */ + __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_CER); /* disable */ - ccr &= (cpu_is_mmp2()) ? TMR_CCR_CS_0(0) : TMR_CCR_CS_0(3); + ccr &= (cpu_is_mmp2()) ? (TMR_CCR_CS_0(0) | TMR_CCR_CS_1(0)) : + (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3)); __raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR); /* free-running mode */ - __raw_writel(cmr | 0x01, TIMERS_VIRT_BASE + TMR_CMR); + __raw_writel(0x3, TIMERS_VIRT_BASE + TMR_CMR); __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* free-running */ __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); + __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(1)); /* free-running */ + __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */ + __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1)); + /* enable timer counter */ - __raw_writel(cer | 0x01, TIMERS_VIRT_BASE + TMR_CER); + __raw_writel(0x3, TIMERS_VIRT_BASE + TMR_CER); } static struct irqaction timer_irq = { -- cgit v0.10.2 From 71c0c341403cb141e3580817947e56a4386db4c8 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 10 Aug 2011 02:37:34 +0800 Subject: ARM: mmp: Switch to using timer 1 as clocksource timer. Signed-off-by: Lennert Buytenhek Acked-by: Haojian Zhuang Signed-off-by: Eric Miao diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 09e88c2..c53715e 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -51,12 +51,12 @@ static inline uint32_t timer_read(void) { int delay = 100; - __raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(0)); + __raw_writel(1, TIMERS_VIRT_BASE + TMR_CVWR(1)); while (delay--) cpu_relax(); - return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(0)); + return __raw_readl(TIMERS_VIRT_BASE + TMR_CVWR(1)); } unsigned long long notrace sched_clock(void) -- cgit v0.10.2 From af9dafb1dcf320a46783e09764c758bc4e32ed94 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Wed, 10 Aug 2011 02:37:55 +0800 Subject: ARM: mmp: Change the way we use timer 0 as clockevent timer. Instead of setting up a match interrupt for 'current_time + delta' on ->set_next_event(), program timer 0 to count down from 'delta - 1' and trigger an interrupt when it reaches zero. Signed-off-by: Lennert Buytenhek Acked-by: Haojian Zhuang Signed-off-by: Eric Miao diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index c53715e..4e91ee6 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -75,28 +75,51 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) { struct clock_event_device *c = dev_id; - /* disable and clear pending interrupt status */ - __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); - __raw_writel(0x1, TIMERS_VIRT_BASE + TMR_ICR(0)); + /* + * Clear pending interrupt status. + */ + __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); + + /* + * Disable timer 0. + */ + __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); + c->event_handler(c); + return IRQ_HANDLED; } static int timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { - unsigned long flags, next; + unsigned long flags; local_irq_save(flags); - /* clear pending interrupt status and enable */ + /* + * Disable timer 0. + */ + __raw_writel(0x02, TIMERS_VIRT_BASE + TMR_CER); + + /* + * Clear and enable timer match 0 interrupt. + */ __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_ICR(0)); __raw_writel(0x01, TIMERS_VIRT_BASE + TMR_IER(0)); - next = timer_read() + delta; - __raw_writel(next, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0)); + /* + * Setup new clockevent timer value. + */ + __raw_writel(delta - 1, TIMERS_VIRT_BASE + TMR_TN_MM(0, 0)); + + /* + * Enable timer 0. + */ + __raw_writel(0x03, TIMERS_VIRT_BASE + TMR_CER); local_irq_restore(flags); + return 0; } @@ -152,10 +175,10 @@ static void __init timer_config(void) (TMR_CCR_CS_0(3) | TMR_CCR_CS_1(3)); __raw_writel(ccr, TIMERS_VIRT_BASE + TMR_CCR); - /* free-running mode */ - __raw_writel(0x3, TIMERS_VIRT_BASE + TMR_CMR); + /* set timer 0 to periodic mode, and timer 1 to free-running mode */ + __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CMR); - __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* free-running */ + __raw_writel(0x1, TIMERS_VIRT_BASE + TMR_PLCR(0)); /* periodic */ __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(0)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(0)); @@ -163,8 +186,8 @@ static void __init timer_config(void) __raw_writel(0x7, TIMERS_VIRT_BASE + TMR_ICR(1)); /* clear status */ __raw_writel(0x0, TIMERS_VIRT_BASE + TMR_IER(1)); - /* enable timer counter */ - __raw_writel(0x3, TIMERS_VIRT_BASE + TMR_CER); + /* enable timer 1 counter */ + __raw_writel(0x2, TIMERS_VIRT_BASE + TMR_CER); } static struct irqaction timer_irq = { -- cgit v0.10.2 From feb00dceb5af57ce34514ce66096b32d133ded3d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 11 Aug 2011 12:23:22 +0900 Subject: ASoC: Terminate WM8750 SPI device ID table Signed-off-by: Mark Brown Reported-by: Stephen Rothwell diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c index e6f47f4..82ac5fc 100644 --- a/sound/soc/codecs/wm8750.c +++ b/sound/soc/codecs/wm8750.c @@ -781,6 +781,7 @@ static int __devexit wm8750_spi_remove(struct spi_device *spi) static const struct spi_device_id wm8750_spi_ids[] = { { "wm8750", 0 }, { "wm8987", 0 }, + { 0, 0 }, }; MODULE_DEVICE_TABLE(spi, wm8750_spi_ids); -- cgit v0.10.2 From f0e3d0689da401f7d1981c2777a714ba295ea5ff Mon Sep 17 00:00:00 2001 From: Mike Waychison Date: Wed, 10 Aug 2011 21:59:57 -0700 Subject: tcp: initialize variable ecn_ok in syncookies path Using a gcc 4.4.3, warnings are emitted for a possibly uninitialized use of ecn_ok. This can happen if cookie_check_timestamp() returns due to not having seen a timestamp. Defaulting to ecn off seems like a reasonable thing to do in this case, so initialized ecn_ok to false. Signed-off-by: Mike Waychison Signed-off-by: David S. Miller diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 92bb943..3bc5c8f 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -276,7 +276,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, int mss; struct rtable *rt; __u8 rcv_wscale; - bool ecn_ok; + bool ecn_ok = false; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 89d5bf8..ac83896 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -165,7 +165,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) int mss; struct dst_entry *dst; __u8 rcv_wscale; - bool ecn_ok; + bool ecn_ok = false; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; -- cgit v0.10.2 From 2f1def2695c223b2aa325e5e47d0d64200a45d23 Mon Sep 17 00:00:00 2001 From: Florian Echtler Date: Tue, 9 Aug 2011 13:37:49 +0200 Subject: USB: Serial: Add device ID for Sierra Wireless MC8305 A new device ID pair is added for Sierra Wireless MC8305. Signed-off-by: Florian Echtler Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 27f9ae4..aeccc7f 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -79,6 +79,7 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE(0x1199, 0x9008)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {USB_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ + {USB_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ {USB_DEVICE(0x16d8, 0x8001)}, /* CMDTech Gobi 2000 QDL device (VU922) */ {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ -- cgit v0.10.2 From ce7e9065958191e6b7ca49d7ed0e1099c486d198 Mon Sep 17 00:00:00 2001 From: Artur Zimmer Date: Wed, 10 Aug 2011 03:51:28 +0200 Subject: USB: Serial: Add PID(0xF7C0) to FTDI SIO driver for a zeitcontrol-device Here is a patch for a new PID (zeitcontrol-device mifare-reader FT232BL(like FT232BM but lead free)). Signed-off-by: Artur Zimmer Signed-off-by: Greg Kroah-Hartman diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 7d42f61..78a2cf9 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -151,6 +151,7 @@ static struct ftdi_sio_quirk ftdi_stmclite_quirk = { * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! */ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 19156d1..bf5227a 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1159,4 +1159,8 @@ /* USB-Nano-485*/ #define FTDI_CTI_NANO_PID 0xF60B - +/* + * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de + */ +/* TagTracer MIFARE*/ +#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0 -- cgit v0.10.2 From 8e4bf84474960e832b56293c9b0674c88b5b05ce Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Thu, 11 Aug 2011 10:36:03 +0200 Subject: Move some REQ flags to the common bio/request area REQ_SECURE, REQ_FLUSH and REQ_FUA may all be set on a bio as well as on a request, so relocate them to the shared part of the enum. Signed-off-by: Matthew Wilcox Signed-off-by: Namhyung Kim Reviewed-by: Jeff Moyer Signed-off-by: Jens Axboe diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 6395692..32f0076 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -125,7 +125,11 @@ enum rq_flag_bits { __REQ_SYNC, /* request is sync (sync write or read) */ __REQ_META, /* metadata io request */ __REQ_DISCARD, /* request to discard sectors */ + __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ + __REQ_NOIDLE, /* don't anticipate more IO after this one */ + __REQ_FUA, /* forced unit access */ + __REQ_FLUSH, /* request for cache flush */ /* bio only flags */ __REQ_RAHEAD, /* read ahead, can fail anytime */ @@ -135,7 +139,6 @@ enum rq_flag_bits { /* request only flags */ __REQ_SORTED, /* elevator knows about this request */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ - __REQ_FUA, /* forced unit access */ __REQ_NOMERGE, /* don't touch this for merging */ __REQ_STARTED, /* drive already may have started this one */ __REQ_DONTPREP, /* don't call prep for this one */ @@ -146,11 +149,9 @@ enum rq_flag_bits { __REQ_PREEMPT, /* set for "ide_preempt" requests */ __REQ_ALLOCED, /* request came from our alloc pool */ __REQ_COPY_USER, /* contains copies of user pages */ - __REQ_FLUSH, /* request for cache flush */ __REQ_FLUSH_SEQ, /* request for flush sequence */ __REQ_IO_STAT, /* account I/O stat */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */ - __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ __REQ_NR_BITS, /* stops here */ }; -- cgit v0.10.2 From c09c47caedc9854d59378d6e34c989e51cfdd2b4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 11 Aug 2011 10:36:05 +0200 Subject: blktrace: add FLUSH/FUA support Add FLUSH/FUA support to blktrace. As FLUSH precedes WRITE and/or FUA follows WRITE, use the same 'F' flag for both cases and distinguish them by their (relative) position. The end results look like (other flags might be shown also): - WRITE: W - WRITE_FLUSH: FW - WRITE_FUA: WF - WRITE_FLUSH_FUA: FWF Note that we reuse TC_BARRIER due to lack of bit space of act_mask so that the older versions of blktrace tools will report flush requests as barriers from now on. Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Ingo Molnar Signed-off-by: Namhyung Kim Reviewed-by: Jeff Moyer Signed-off-by: Jens Axboe diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 8c7c2de..8e9e4bc 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -14,7 +14,7 @@ enum blktrace_cat { BLK_TC_READ = 1 << 0, /* reads */ BLK_TC_WRITE = 1 << 1, /* writes */ - BLK_TC_BARRIER = 1 << 2, /* barrier */ + BLK_TC_FLUSH = 1 << 2, /* flush */ BLK_TC_SYNC = 1 << 3, /* sync IO */ BLK_TC_SYNCIO = BLK_TC_SYNC, BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ @@ -28,8 +28,9 @@ enum blktrace_cat { BLK_TC_META = 1 << 12, /* metadata */ BLK_TC_DISCARD = 1 << 13, /* discard requests */ BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */ + BLK_TC_FUA = 1 << 15, /* fua requests */ - BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ + BLK_TC_END = 1 << 15, /* we've run out of bits! */ }; #define BLK_TC_SHIFT (16) diff --git a/include/trace/events/block.h b/include/trace/events/block.h index bf36654..05c5e61 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -8,6 +8,8 @@ #include #include +#define RWBS_LEN 8 + DECLARE_EVENT_CLASS(block_rq_with_error, TP_PROTO(struct request_queue *q, struct request *rq), @@ -19,7 +21,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( int, errors ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN ) __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) ), @@ -104,7 +106,7 @@ DECLARE_EVENT_CLASS(block_rq, __field( sector_t, sector ) __field( unsigned int, nr_sector ) __field( unsigned int, bytes ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) ), @@ -183,7 +185,7 @@ TRACE_EVENT(block_bio_bounce, __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), @@ -222,7 +224,7 @@ TRACE_EVENT(block_bio_complete, __field( sector_t, sector ) __field( unsigned, nr_sector ) __field( int, error ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( @@ -249,7 +251,7 @@ DECLARE_EVENT_CLASS(block_bio, __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), @@ -321,7 +323,7 @@ DECLARE_EVENT_CLASS(block_get_rq, __field( dev_t, dev ) __field( sector_t, sector ) __field( unsigned int, nr_sector ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), @@ -456,7 +458,7 @@ TRACE_EVENT(block_split, __field( dev_t, dev ) __field( sector_t, sector ) __field( sector_t, new_sector ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN ) __array( char, comm, TASK_COMM_LEN ) ), @@ -498,7 +500,7 @@ TRACE_EVENT(block_bio_remap, __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( @@ -542,7 +544,7 @@ TRACE_EVENT(block_rq_remap, __field( unsigned int, nr_sector ) __field( dev_t, old_dev ) __field( sector_t, old_sector ) - __array( char, rwbs, 6 ) + __array( char, rwbs, RWBS_LEN) ), TP_fast_assign( diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 6957aa2..7c910a5 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -206,6 +206,8 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, DISCARD); + what |= MASK_TC_BIT(rw, FLUSH); + what |= MASK_TC_BIT(rw, FUA); pid = tsk->pid; if (act_log_check(bt, what, sector, pid)) @@ -1054,6 +1056,9 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) goto out; } + if (tc & BLK_TC_FLUSH) + rwbs[i++] = 'F'; + if (tc & BLK_TC_DISCARD) rwbs[i++] = 'D'; else if (tc & BLK_TC_WRITE) @@ -1063,10 +1068,10 @@ static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) else rwbs[i++] = 'N'; + if (tc & BLK_TC_FUA) + rwbs[i++] = 'F'; if (tc & BLK_TC_AHEAD) rwbs[i++] = 'A'; - if (tc & BLK_TC_BARRIER) - rwbs[i++] = 'B'; if (tc & BLK_TC_SYNC) rwbs[i++] = 'S'; if (tc & BLK_TC_META) @@ -1132,7 +1137,7 @@ typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); static int blk_log_action_classic(struct trace_iterator *iter, const char *act) { - char rwbs[6]; + char rwbs[RWBS_LEN]; unsigned long long ts = iter->ts; unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC); unsigned secs = (unsigned long)ts; @@ -1148,7 +1153,7 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) static int blk_log_action(struct trace_iterator *iter, const char *act) { - char rwbs[6]; + char rwbs[RWBS_LEN]; const struct blk_io_trace *t = te_blk_io_trace(iter->ent); fill_rwbs(rwbs, t); @@ -1561,7 +1566,7 @@ static const struct { } mask_maps[] = { { BLK_TC_READ, "read" }, { BLK_TC_WRITE, "write" }, - { BLK_TC_BARRIER, "barrier" }, + { BLK_TC_FLUSH, "flush" }, { BLK_TC_SYNC, "sync" }, { BLK_TC_QUEUE, "queue" }, { BLK_TC_REQUEUE, "requeue" }, @@ -1573,6 +1578,7 @@ static const struct { { BLK_TC_META, "meta" }, { BLK_TC_DISCARD, "discard" }, { BLK_TC_DRV_DATA, "drv_data" }, + { BLK_TC_FUA, "fua" }, }; static int blk_trace_str2mask(const char *str) @@ -1788,6 +1794,9 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) { int i = 0; + if (rw & REQ_FLUSH) + rwbs[i++] = 'F'; + if (rw & WRITE) rwbs[i++] = 'W'; else if (rw & REQ_DISCARD) @@ -1797,6 +1806,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) else rwbs[i++] = 'N'; + if (rw & REQ_FUA) + rwbs[i++] = 'F'; if (rw & REQ_RAHEAD) rwbs[i++] = 'A'; if (rw & REQ_SYNC) -- cgit v0.10.2 From bcf30e75b773b60379338768677a1301ef602ff9 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 11 Aug 2011 10:39:04 +0200 Subject: block: improve rq_affinity placement This patch reverts commit 35ae66e0a09ab70ed(block: Make rq_affinity = 1 work as expected). The purpose is to avoid an unnecessary IPI. Let's take an example. My test box has cpu 0-7, one socket. Say request is added from CPU 1, blk_complete_request() occurs at CPU 7. Without the reverted patch, softirq will be done at CPU 7. With it, an IPI will be directed to CPU 0, and softirq will be done at CPU 0. In this case, doing softirq at CPU 0 and CPU 7 have no difference from cache sharing point view and we can avoid an ipi if doing it in CPU 7. An immediate concern is this is just like QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is running in interrupt handler, and currently I/O controller doesn't support multiple interrupts (I checked several LSI cards and AHCI), so only one CPU can run blk_complete_request(). This is still quite different as QUEUE_FLAG_SAME_FORCE. Since only one CPU runs softirq, the only difference with below patch is softirq not always runs at the first CPU of a group. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 487addc..58340d0 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -103,7 +103,7 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = { void __blk_complete_request(struct request *req) { - int ccpu, cpu; + int ccpu, cpu, group_cpu = NR_CPUS; struct request_queue *q = req->q; unsigned long flags; @@ -117,12 +117,22 @@ void __blk_complete_request(struct request *req) */ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) { ccpu = req->cpu; - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { ccpu = blk_cpu_to_group(ccpu); + group_cpu = blk_cpu_to_group(cpu); + } } else ccpu = cpu; - if (ccpu == cpu) { + /* + * If current CPU and requested CPU are in the same group, running + * softirq in current CPU. One might concern this is just like + * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is + * running in interrupt handler, and currently I/O controller doesn't + * support multiple interrupts, so current CPU is unique actually. This + * avoids IPI sending from current CPU to the first CPU of a group. + */ + if (ccpu == cpu || ccpu == group_cpu) { struct list_head *list; do_local: list = &__get_cpu_var(blk_cpu_done); -- cgit v0.10.2 From f57b05ed532ccf3b3e22878a5678ca10de50ad29 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 1 Jun 2011 21:43:46 +0200 Subject: perf report: Use properly build_id kernel binaries If we bring the recorded perf data together with kernel binary from another machine using: on server A: perf archive on server B: tar xjvf perf.data.tar.bz2 -C ~/.debug the build_id kernel dso is not properly recognized during the "perf report" command on server B. The reason is, that build_id dsos are added during the session initialization, while the kernel maps are created during the sample event processing. The machine__create_kernel_maps functions ends up creating new dso object for kernel, but it does not check if we already have one added by build_id processing. Also the build_id reading ABI quirk added in commit: - commit b25114817a73bbd2b84ce9dba02ee1ef8989a947 perf build-id: Add quirk to deal with perf.data file format breakage populates the "struct build_id_event::pid" with 0, which is later interpreted as DEFAULT_GUEST_KERNEL_ID. This is not always correct, so it's better to guess the pid value based on the "struct build_id_event::header::misc" value. - Tested with data generated on x86 kernel version v2.6.34 and reported back on x86_64 current kernel. - Not tested for guest kernel case. Note the problem stays for PERF_RECORD_MMAP events recorded by perf that does not use proper pid (HOST_KERNEL_ID/DEFAULT_GUEST_KERNEL_ID). They are misinterpreted within the current perf code. Probably there's not much we can do about that. Cc: Avi Kivity Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Yanmin Zhang Link: http://lkml.kernel.org/r/20110601194346.GB1934@jolsa.brq.redhat.com Signed-off-by: Jiri Olsa Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index d4f3101..b6c1ad1 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -726,7 +726,16 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, return -1; bev.header = old_bev.header; - bev.pid = 0; + + /* + * As the pid is the missing value, we need to fill + * it properly. The header.misc value give us nice hint. + */ + bev.pid = HOST_KERNEL_ID; + if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || + bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) + bev.pid = DEFAULT_GUEST_KERNEL_ID; + memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); __event_process_build_id(&bev, filename, session); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index a8b5371..e142c21 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2181,27 +2181,22 @@ size_t machines__fprintf_dsos_buildid(struct rb_root *machines, return ret; } -struct dso *dso__new_kernel(const char *name) +static struct dso* +dso__kernel_findnew(struct machine *machine, const char *name, + const char *short_name, int dso_type) { - struct dso *dso = dso__new(name ?: "[kernel.kallsyms]"); - - if (dso != NULL) { - dso__set_short_name(dso, "[kernel]"); - dso->kernel = DSO_TYPE_KERNEL; - } - - return dso; -} + /* + * The kernel dso could be created by build_id processing. + */ + struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name); -static struct dso *dso__new_guest_kernel(struct machine *machine, - const char *name) -{ - char bf[PATH_MAX]; - struct dso *dso = dso__new(name ?: machine__mmap_name(machine, bf, - sizeof(bf))); + /* + * We need to run this in all cases, since during the build_id + * processing we had no idea this was the kernel dso. + */ if (dso != NULL) { - dso__set_short_name(dso, "[guest.kernel]"); - dso->kernel = DSO_TYPE_GUEST_KERNEL; + dso__set_short_name(dso, short_name); + dso->kernel = dso_type; } return dso; @@ -2219,24 +2214,36 @@ void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) dso->has_build_id = true; } -static struct dso *machine__create_kernel(struct machine *machine) +static struct dso *machine__get_kernel(struct machine *machine) { const char *vmlinux_name = NULL; struct dso *kernel; if (machine__is_host(machine)) { vmlinux_name = symbol_conf.vmlinux_name; - kernel = dso__new_kernel(vmlinux_name); + if (!vmlinux_name) + vmlinux_name = "[kernel.kallsyms]"; + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[kernel]", + DSO_TYPE_KERNEL); } else { + char bf[PATH_MAX]; + if (machine__is_default_guest(machine)) vmlinux_name = symbol_conf.default_guest_vmlinux_name; - kernel = dso__new_guest_kernel(machine, vmlinux_name); + if (!vmlinux_name) + vmlinux_name = machine__mmap_name(machine, bf, + sizeof(bf)); + + kernel = dso__kernel_findnew(machine, vmlinux_name, + "[guest.kernel]", + DSO_TYPE_GUEST_KERNEL); } - if (kernel != NULL) { + if (kernel != NULL && (!kernel->has_build_id)) dso__read_running_kernel_build_id(kernel, machine); - dsos__add(&machine->kernel_dsos, kernel); - } + return kernel; } @@ -2340,7 +2347,7 @@ void machine__destroy_kernel_maps(struct machine *machine) int machine__create_kernel_maps(struct machine *machine) { - struct dso *kernel = machine__create_kernel(machine); + struct dso *kernel = machine__get_kernel(machine); if (kernel == NULL || __machine__create_kernel_maps(machine, kernel) < 0) diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 325ee36..4f377d9 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -155,7 +155,6 @@ struct dso { }; struct dso *dso__new(const char *name); -struct dso *dso__new_kernel(const char *name); void dso__delete(struct dso *dso); int dso__name_len(const struct dso *dso); -- cgit v0.10.2 From e33f7a9f37d486f4c6cce5de18a6eea11d68f64f Mon Sep 17 00:00:00 2001 From: Tim Chen Date: Tue, 9 Aug 2011 06:48:32 +0000 Subject: scm: Capture the full credentials of the scm sender This patch corrects an erroneous update of credential's gid with uid introduced in commit 257b5358b32f17 since 2.6.36. Signed-off-by: Tim Chen Acked-by: Eric Dumazet Reviewed-by: James Morris Signed-off-by: David S. Miller diff --git a/net/core/scm.c b/net/core/scm.c index 4c1ef02..811b53f 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -192,7 +192,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) goto error; cred->uid = cred->euid = p->creds.uid; - cred->gid = cred->egid = p->creds.uid; + cred->gid = cred->egid = p->creds.gid; put_cred(p->cred); p->cred = cred; } -- cgit v0.10.2 From 174c95d2526afbf974d9134f7f91fca8c4c8b538 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Wed, 10 Aug 2011 05:18:59 +0000 Subject: slcan: ldisc generated skbs are received in softirq context As this discussion pointed out http://marc.info/?l=linux-netdev&m=131257225602375 netdevices that are based on serial line disciplines should use netif_rx_ni() when pushing received socketbuffers into the netdev rx queue. Following commit 614851601c121b1320a35757ab88292d6272f906 ("slip: fix NOHZ local_softirq_pending 08 warning") this patch updates the slcan driver accordingly. Signed-off-by: Oliver Hartkopp CC: Matvejchikov Ilya CC: Alan Cox Signed-off-by: David S. Miller diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index f523f1c..4b70b7e 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -197,7 +197,7 @@ static void slc_bump(struct slcan *sl) skb->ip_summed = CHECKSUM_UNNECESSARY; memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); - netif_rx(skb); + netif_rx_ni(skb); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; -- cgit v0.10.2 From a916d82b1498207fbc04947e2b1ad4845db09c77 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 10 Aug 2011 23:25:42 +0000 Subject: net/irda: sh_irda: add missing header This patch fixup below build error on sh_irda sh_irda.c: In function 'sh_irda_write': sh_irda.c:174: error: implicit declaration of function 'iowrite16' sh_irda.c: In function 'sh_irda_read': sh_irda.c:184: error: implicit declaration of function 'ioread16' sh_irda.c: At top level: sh_irda.c:492: error: expected '=', ',', ';', 'asm' or '__attribute__' before 'sh_irda_irq' sh_irda.c: In function 'sh_irda_probe': sh_irda.c:776: error: implicit declaration of function 'ioremap_nocache' sh_irda.c:776: warning: assignment makes pointer from integer without a cast sh_irda.c:811: error: implicit declaration of function 'request_irq' sh_irda.c:811: error: 'sh_irda_irq' undeclared (first use in this function) sh_irda.c:811: error: (Each undeclared identifier is reported only once sh_irda.c:811: error: for each function it appears in.) sh_irda.c:811: error: 'IRQF_DISABLED' undeclared (first use in this function) sh_irda.c:825: error: implicit declaration of function 'iounmap' Signed-off-by: Kuninori Morimoto Signed-off-by: David S. Miller diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index 4488bd5..8266067 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -22,6 +22,8 @@ * - DMA transfer support * - FIFO mode support */ +#include +#include #include #include #include -- cgit v0.10.2 From 86d0aff0e52cc3954529774c6243514deb705cb5 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 10 Aug 2011 23:26:09 +0000 Subject: net/irda: sh_sir: add missing header This patch fixup below build error on sh_sir sh_sir.c: In function 'sh_sir_write': sh_sir.c:127:2: error: implicit declaration of function 'iowrite16' sh_sir.c: In function 'sh_sir_read': sh_sir.c:132:2: error: implicit declaration of function 'ioread16' sh_sir.c: At top level: sh_sir.c:561:20: error: expected '=', ',', ';', 'asm' or '__attribute__' before 'sh_sir_irq' sh_sir.c: In function 'sh_sir_probe': sh_sir.c:727:2: error: implicit declaration of function 'ioremap_nocache' sh_sir.c:727:16: warning: assignment makes pointer from integer without a cast sh_sir.c:762:2: error: implicit declaration of function 'request_irq' sh_sir.c:762:23: error: 'sh_sir_irq' undeclared (first use in this function) sh_sir.c:762:23: note: each undeclared identifier is reported only once for each function it appears in sh_sir.c:762:35: error: 'IRQF_DISABLED' undeclared (first use in this function) sh_sir.c:776:2: error: implicit declaration of function 'iounmap' sh_sir.c: At top level: sh_sir.c:436:13: warning: 'sh_sir_clear_all_err' defined but not used sh_sir.c:474:12: warning: 'sh_sir_is_which_phase' defined but not used sh_sir.c:490:13: warning: 'sh_sir_tx' defined but not used sh_sir.c:540:13: warning: 'sh_sir_rx' defined but not used Signed-off-by: Kuninori Morimoto Signed-off-by: David S. Miller diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 52a7c86..10b13ea 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -12,6 +12,8 @@ * published by the Free Software Foundation. */ +#include +#include #include #include #include -- cgit v0.10.2 From 2eed7982d76f3a1627ba6536128a64b8e66ad189 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 10 Aug 2011 23:26:37 +0000 Subject: net/irda: sh_sir: tidyup compile warning This patch tidyup below warning ${LINUX}/drivers/net/irda/sh_sir.c:514:6: warning: 'val' may be used uninitialized in this function Signed-off-by: Kuninori Morimoto Signed-off-by: David S. Miller diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 10b13ea..ed7d7d6 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -513,7 +513,7 @@ static void sh_sir_tx(struct sh_sir_self *self, int phase) static int sh_sir_read_data(struct sh_sir_self *self) { - u16 val; + u16 val = 0; int timeout = 1024; while (timeout--) { -- cgit v0.10.2 From 94a80d63b245c66745c1d72b8154f67b597e3b89 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 11 Aug 2011 00:06:04 +0000 Subject: net/netlabel/netlabel_kapi.c: add missing cleanup code Call cipso_v4_doi_putdef in the case of the failure of the allocation of entry. Reverse the order of the error handling code at the end of the function and insert more labels in order to reduce the number of unnecessary calls to kfree. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 58107d0..9c24de1 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -341,11 +341,11 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) - return -ENOMEM; + goto out_entry; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) - goto cfg_cipsov4_map_add_failure; + goto out_domain; } if (addr == NULL && mask == NULL) { @@ -354,13 +354,13 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, } else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) - goto cfg_cipsov4_map_add_failure; + goto out_addrmap; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); if (addrinfo == NULL) - goto cfg_cipsov4_map_add_failure; + goto out_addrinfo; addrinfo->type_def.cipsov4 = doi_def; addrinfo->type = NETLBL_NLTYPE_CIPSOV4; addrinfo->list.addr = addr->s_addr & mask->s_addr; @@ -374,7 +374,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, entry->type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; - goto cfg_cipsov4_map_add_failure; + goto out_addrmap; } ret_val = netlbl_domhsh_add(entry, audit_info); @@ -384,11 +384,15 @@ int netlbl_cfg_cipsov4_map_add(u32 doi, return 0; cfg_cipsov4_map_add_failure: - cipso_v4_doi_putdef(doi_def); + kfree(addrinfo); +out_addrinfo: + kfree(addrmap); +out_addrmap: kfree(entry->domain); +out_domain: kfree(entry); - kfree(addrmap); - kfree(addrinfo); +out_entry: + cipso_v4_doi_putdef(doi_def); return ret_val; } -- cgit v0.10.2 From 5189054dd7ff18576446edc270b6b69fa8285336 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Thu, 11 Aug 2011 01:59:38 +0000 Subject: net/bridge/netfilter/ebtables.c: use available error handling code Free the locally allocated table and newinfo as done in adjacent error handling code. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 2b5ca1a..5864cc4 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1198,7 +1198,8 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table) if (table->check && table->check(newinfo, table->valid_hooks)) { BUGPRINT("The table doesn't like its own initial data, lol\n"); - return ERR_PTR(-EINVAL); + ret = -EINVAL; + goto free_chainstack; } table->private = newinfo; -- cgit v0.10.2 From 97a804102021431fa6fa33c21c85df762b0f5cb9 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Tue, 9 Aug 2011 04:01:16 +0000 Subject: ipv4: some rt_iif -> rt_route_iif conversions As rt_iif represents input device even for packets coming from loopback with output route, it is not an unique key specific to input routes. Now rt_route_iif has such role, it was fl.iif in 2.6.38, so better to change the checks at some places to save CPU cycles and to restore 2.6.38 semantics. compare_keys: - input routes: only rt_route_iif matters, rt_iif is same - output routes: only rt_oif matters, rt_iif is not used for matching in __ip_route_output_key - now we are back to 2.6.38 state ip_route_input_common: - matching rt_route_iif implies input route - compared to 2.6.38 we eliminated one rth->fl.oif check because it was not needed even for 2.6.38 compare_hash_inputs: Only the change here is not an optimization, it has effect only for output routes. I assume I'm restoring the original intention to ignore oif, it was using fl.iif - now we are back to 2.6.38 state Signed-off-by: Julian Anastasov Signed-off-by: David S. Miller diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cb7efe0..075212e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -722,7 +722,7 @@ static inline bool compare_hash_inputs(const struct rtable *rt1, { return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | - (rt1->rt_iif ^ rt2->rt_iif)) == 0); + (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0); } static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) @@ -732,8 +732,7 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2) (rt1->rt_mark ^ rt2->rt_mark) | (rt1->rt_key_tos ^ rt2->rt_key_tos) | (rt1->rt_route_iif ^ rt2->rt_route_iif) | - (rt1->rt_oif ^ rt2->rt_oif) | - (rt1->rt_iif ^ rt2->rt_iif)) == 0; + (rt1->rt_oif ^ rt2->rt_oif)) == 0; } static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) @@ -2321,9 +2320,8 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth = rcu_dereference(rth->dst.rt_next)) { if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) | ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | - (rth->rt_iif ^ iif) | + (rth->rt_route_iif ^ iif) | (rth->rt_key_tos ^ tos)) == 0 && - rt_is_input_route(rth) && rth->rt_mark == skb->mark && net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { -- cgit v0.10.2 From 45d3539a2309858906abd8261c26f8ba8e50405a Mon Sep 17 00:00:00 2001 From: Vladislav Zolotarov Date: Tue, 9 Aug 2011 03:08:09 +0000 Subject: bnx2x: init FCOE FP only once Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index d724a18..64df0ef 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -63,8 +63,9 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0); #ifdef BCM_CNIC - /* We don't want TPA on FCoE, FWD and OOO L2 rings */ - bnx2x_fcoe(bp, disable_tpa) = 1; + /* We don't want TPA on an FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + fp->disable_tpa = 1; #endif } -- cgit v0.10.2 From cdb9d6ae8d128cc01d7a0649201102cd7de356e0 Mon Sep 17 00:00:00 2001 From: Vladislav Zolotarov Date: Tue, 9 Aug 2011 03:08:55 +0000 Subject: bnx2x: fix select_queue when FCoE is disabled Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 64df0ef..37e5790 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -1405,10 +1405,9 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) { struct bnx2x *bp = netdev_priv(dev); + #ifdef BCM_CNIC - if (NO_FCOE(bp)) - return skb_tx_hash(dev, skb); - else { + if (!NO_FCOE(bp)) { struct ethhdr *hdr = (struct ethhdr *)skb->data; u16 ether_type = ntohs(hdr->h_proto); @@ -1425,8 +1424,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) return bnx2x_fcoe_tx(bp, txq_index); } #endif - /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring - */ + /* select a non-FCoE queue */ return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); } @@ -1449,6 +1447,28 @@ void bnx2x_set_num_queues(struct bnx2x *bp) bp->num_queues += NON_ETH_CONTEXT_USE; } +/** + * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues + * + * @bp: Driver handle + * + * We currently support for at most 16 Tx queues for each CoS thus we will + * allocate a multiple of 16 for ETH L2 rings according to the value of the + * bp->max_cos. + * + * If there is an FCoE L2 queue the appropriate Tx queue will have the next + * index after all ETH L2 indices. + * + * If the actual number of Tx queues (for each CoS) is less than 16 then there + * will be the holes at the end of each group of 16 ETh L2 indices (0..15, + * 16..31,...) with indicies that are not coupled with any real Tx queue. + * + * The proper configuration of skb->queue_mapping is handled by + * bnx2x_select_queue() and __skb_tx_hash(). + * + * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() + * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). + */ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) { int rc, tx, rx; -- cgit v0.10.2 From 2031bd3a8adce1259756e7f142b230c010035995 Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Tue, 9 Aug 2011 03:09:52 +0000 Subject: bnx2x: prevent race between undi_unload and load flows Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 1507091..1f5467f 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -5798,6 +5798,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp)); + /* + * take the UNDI lock to protect undi_unload flow from accessing + * registers while we're resetting the chip + */ + bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + bnx2x_reset_common(bp); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff); @@ -5808,6 +5814,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON); if (!CHIP_IS_E1x(bp)) { -- cgit v0.10.2 From 9f0096a1578bca77b28762c89b29affee69a20f4 Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Tue, 9 Aug 2011 03:10:29 +0000 Subject: bnx2x: properly clean indirect addresses Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 1f5467f..f74582a 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -10259,10 +10259,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, /* clean indirect addresses */ pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); - REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); - REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); + /* Clean the following indirect addresses for all functions since it + * is not used by the driver. + */ + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0); + REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0); /* * Enable internal target-read (in case we are probed after PF FLR). diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 27b5ecb..40266c1 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -3007,11 +3007,27 @@ /* [R 6] Debug only: Number of used entries in the data FIFO */ #define PXP2_REG_HST_DATA_FIFO_STATUS 0x12047c /* [R 7] Debug only: Number of used entries in the header FIFO */ -#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 -#define PXP2_REG_PGL_ADDR_88_F0 0x120534 -#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 -#define PXP2_REG_PGL_ADDR_90_F0 0x12053c -#define PXP2_REG_PGL_ADDR_94_F0 0x120540 +#define PXP2_REG_HST_HEADER_FIFO_STATUS 0x120478 +#define PXP2_REG_PGL_ADDR_88_F0 0x120534 +/* [R 32] GRC address for configuration access to PCIE config address 0x88. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_88_F1 0x120544 +#define PXP2_REG_PGL_ADDR_8C_F0 0x120538 +/* [R 32] GRC address for configuration access to PCIE config address 0x8c. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_8C_F1 0x120548 +#define PXP2_REG_PGL_ADDR_90_F0 0x12053c +/* [R 32] GRC address for configuration access to PCIE config address 0x90. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_90_F1 0x12054c +#define PXP2_REG_PGL_ADDR_94_F0 0x120540 +/* [R 32] GRC address for configuration access to PCIE config address 0x94. + * any write to this PCIE address will cause a GRC write access to the + * address that's in t this register */ +#define PXP2_REG_PGL_ADDR_94_F1 0x120550 #define PXP2_REG_PGL_CONTROL0 0x120490 #define PXP2_REG_PGL_CONTROL1 0x120514 #define PXP2_REG_PGL_DEBUG 0x120520 -- cgit v0.10.2 From 7712b644964c36d1216315b791faccc7d5238b16 Mon Sep 17 00:00:00 2001 From: Dmitry Kravkov Date: Tue, 9 Aug 2011 03:11:13 +0000 Subject: bnx2x: disable dcb on 578xx since not supported yet Signed-off-by: Dmitry Kravkov Signed-off-by: Eilon Greenstein Signed-off-by: David S. Miller diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a4ea35f..a1e004a 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c @@ -920,7 +920,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp, void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled) { - if (!CHIP_IS_E1x(bp)) { + if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) { bp->dcb_state = dcb_on; bp->dcbx_enabled = dcbx_enabled; } else { -- cgit v0.10.2 From aa02bc7084501d2edecb0e5b9de56da070db19aa Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 10 Aug 2011 05:23:46 +0000 Subject: PCnet: Fix section mismatch Building MIPS mtx1_defconfig results in: MODPOST 735 modules WARNING: drivers/net/pcnet32.o(.devinit.text+0x11ec): Section mismatch in reference from the function pcnet32_probe_vlbus.constprop.22() to the variable .init.data:pcnet32_portlist The function __devinit pcnet32_probe_vlbus.constprop.22() references a variable __initdata pcnet32_portlist. If pcnet32_portlist is only used by pcnet32_probe_vlbus.constprop.22 then annotate pcnet32_portlist with a matching annotation. Signed-off-by: Ralf Baechle Signed-off-by: David S. Miller diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 8b3090d..80b6f36 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -82,7 +82,7 @@ static int cards_found; /* * VLB I/O addresses */ -static unsigned int pcnet32_portlist[] __initdata = +static unsigned int pcnet32_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0 }; static int pcnet32_debug; -- cgit v0.10.2 From 54a33b190aa5386dd214b4ad02986445e20e83d1 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Wed, 10 Aug 2011 18:29:21 -0400 Subject: NFS41: make PNFS_BLOCK selectable PNFS_BLOCK needs BLK_DEV_DM/MD, which is not a dependency for other pnfs layout drivers. Seperate it out so others can still build when BLK_DEV_DM/MD is not enabled. Also change select to depends on to avoid build failures. Reported-and-tested-by: Randy Dunlap Signed-off-by: Peng Tao Acked-by: Benny Halevy Signed-off-by: Linus Torvalds diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index be02077..0d30613 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -79,12 +79,9 @@ config NFS_V4_1 depends on NFS_FS && NFS_V4 && EXPERIMENTAL select SUNRPC_BACKCHANNEL select PNFS_FILE_LAYOUT - select PNFS_BLOCK - select MD - select BLK_DEV_DM help This option enables support for minor version 1 of the NFSv4 protocol - (RFC 5661 and RFC 5663) in the kernel's NFS client. + (RFC 5661) in the kernel's NFS client. If unsure, say N. @@ -92,7 +89,14 @@ config PNFS_FILE_LAYOUT tristate config PNFS_BLOCK - tristate + tristate "Provide support for the pNFS Block Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)" + depends on NFS_FS && NFS_V4_1 && BLK_DEV_DM + help + Say M here if you want your pNFS client to support the Block Layout Driver + (RFC 5663). Requires Multiple devices driver support (DM) and Device mapper + support (BLK_DEV_DM). + + If unsure, say N. config PNFS_OBJLAYOUT tristate "Provide support for the pNFS Objects Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)" -- cgit v0.10.2 From a9f729f0e28bb4e4ab0d9e9e3c1675fe4b910f47 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 11 Aug 2011 08:58:41 -0700 Subject: Revert "EDAC: Correct Kconfig dependencies" This reverts commit af9d220bac41dc3201893e1601cc7c44f7da4498. It turns out that one was meant to be applied on top of the edac.git tree in -next that has more i7core_edac changes, but that wasn't clear in the original email. Reported-by: Stephen Rothwell Acked-by: Borislav Petkov Cc: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index c422fea..af1a17d 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -41,7 +41,7 @@ config EDAC_DEBUG config EDAC_DECODE_MCE tristate "Decode MCEs in human-readable form (only on AMD for now)" - depends on CPU_SUP_AMD && X86_MCE_AMD + depends on CPU_SUP_AMD && X86_MCE default y ---help--- Enable this option if you want to decode Machine Check Exceptions @@ -173,7 +173,8 @@ config EDAC_I5400 config EDAC_I7CORE tristate "Intel i7 Core (Nehalem) processors" - depends on EDAC_MM_EDAC && PCI && X86 && X86_MCE_INTEL + depends on EDAC_MM_EDAC && PCI && X86 + select EDAC_MCE help Support for error detection and correction the Intel i7 Core (Nehalem) Integrated Memory Controller that exists on -- cgit v0.10.2 From d16adea3c9d215d98c6fcccc3f91fa8269f91fac Mon Sep 17 00:00:00 2001 From: Tracey Dent Date: Thu, 11 Aug 2011 02:59:00 -0400 Subject: MAINTAINERS: Update linus' git repository Change to new git tree - (git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git). Signed-off-by: Tracey Dent Acked-by: WANG Cong Signed-off-by: Linus Torvalds diff --git a/MAINTAINERS b/MAINTAINERS index 1f8267f..1e55e1e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7357,7 +7357,7 @@ THE REST M: Linus Torvalds L: linux-kernel@vger.kernel.org Q: http://patchwork.kernel.org/project/LKML/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git S: Buried alive in reporters F: * F: */ -- cgit v0.10.2 From d9b830fa444c1f4955d0ee88f5af2aa24d2c7837 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 11 Aug 2011 09:19:29 -0700 Subject: Input: mpu3050 - correct call to input_free_device input_free_device() should be used if input_register_device() was not called yet or if it failed. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c index b95fac1..f71dc72 100644 --- a/drivers/input/misc/mpu3050.c +++ b/drivers/input/misc/mpu3050.c @@ -282,7 +282,7 @@ err_free_irq: err_pm_set_suspended: pm_runtime_set_suspended(&client->dev); err_free_mem: - input_unregister_device(idev); + input_free_device(idev); kfree(sensor); return error; } -- cgit v0.10.2 From 22f83205e59c97c2460ad8e4bd6e71268cb2f37f Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 11 Aug 2011 09:22:45 -0700 Subject: Input: tegra-kbc - correct call to input_free_device If kzalloc for kbc fails, then we have NULL pointer dereference while calling input_free_device(kbc->idev) in the error handling. So it is safer to always use the original name, input_dev. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index f270447..a5a7791 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c @@ -702,7 +702,7 @@ err_iounmap: err_free_mem_region: release_mem_region(res->start, resource_size(res)); err_free_mem: - input_free_device(kbc->idev); + input_free_device(input_dev); kfree(kbc); return err; -- cgit v0.10.2 From 044cd3a574be5cd97ab80d0c6d06f5fab327541d Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Fri, 29 Jul 2011 22:08:07 -0700 Subject: hwmon: (pmbus) Virtualize pmbus_write_byte With virtual pages and to be able to handle more chips, it is necessary to virtualise pmbus_write_byte(). Signed-off-by: Guenter Roeck Reviewed-by: Robert Coulson diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 0808d98..a6ae20f 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -325,6 +325,7 @@ struct pmbus_driver_info { int (*read_word_data)(struct i2c_client *client, int page, int reg); int (*write_word_data)(struct i2c_client *client, int page, int reg, u16 word); + int (*write_byte)(struct i2c_client *client, int page, u8 value); /* * The identify function determines supported PMBus functionality. * This function is only necessary if a chip driver supports multiple diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 5c1b6cf..a561c3a 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -182,6 +182,24 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) } EXPORT_SYMBOL_GPL(pmbus_write_byte); +/* + * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if + * a device specific mapping funcion exists and calls it if necessary. + */ +static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + const struct pmbus_driver_info *info = data->info; + int status; + + if (info->write_byte) { + status = info->write_byte(client, page, value); + if (status != -ENODATA) + return status; + } + return pmbus_write_byte(client, page, value); +} + int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) { int rv; @@ -281,7 +299,7 @@ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg) static void pmbus_clear_fault_page(struct i2c_client *client, int page) { - pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); + _pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); } void pmbus_clear_faults(struct i2c_client *client) -- cgit v0.10.2 From 3a2805e845761ea76a6ad5688d637b2624de0cab Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Fri, 29 Jul 2011 23:05:25 -0700 Subject: hwmon: (pmbus/lm25066) Ignore byte writes to non-zero pages pmbus_clear_faults() attempts to clear faults on non-existing real pages. As a result, the command error bit in the status register is set, and faults are not really cleared. All byte writes to non-zero pages are requests to clear the status register on that page. Since non-zero pages are virtual and do not exist on the chip, there is nothing to do, and such requests have to be ignored. This fixes above problem. Signed-off-by: Guenter Roeck Reviewed-by: Robert Coulson diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c index d4bc114..ac254fb 100644 --- a/drivers/hwmon/pmbus/lm25066.c +++ b/drivers/hwmon/pmbus/lm25066.c @@ -161,6 +161,17 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg, return ret; } +static int lm25066_write_byte(struct i2c_client *client, int page, u8 value) +{ + if (page > 1) + return -EINVAL; + + if (page == 0) + return pmbus_write_byte(client, 0, value); + + return 0; +} + static int lm25066_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -204,6 +215,7 @@ static int lm25066_probe(struct i2c_client *client, info->read_word_data = lm25066_read_word_data; info->write_word_data = lm25066_write_word_data; + info->write_byte = lm25066_write_byte; switch (id->driver_data) { case lm25066: -- cgit v0.10.2 From 66a89b2164e2d30661edbd1953eacf0594d8203a Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 9 Aug 2011 11:10:56 -0400 Subject: hwmon: (ibmaem) add missing kfree rs_resp is dynamically allocated in aem_read_sensor(), so it should be freed before exiting in every case. This collects the kfree and the return at the end of the function. Signed-off-by: Julia Lawall Signed-off-by: Guenter Roeck Cc: stable@kernel.org # 2.6.27+ diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index 1a409c5..c316294 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -432,13 +432,15 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, aem_send_message(ipmi); res = wait_for_completion_timeout(&ipmi->read_complete, IPMI_TIMEOUT); - if (!res) - return -ETIMEDOUT; + if (!res) { + res = -ETIMEDOUT; + goto out; + } if (ipmi->rx_result || ipmi->rx_msg_len != rs_size || memcmp(&rs_resp->id, &system_x_id, sizeof(system_x_id))) { - kfree(rs_resp); - return -ENOENT; + res = -ENOENT; + goto out; } switch (size) { @@ -463,8 +465,11 @@ static int aem_read_sensor(struct aem_data *data, u8 elt, u8 reg, break; } } + res = 0; - return 0; +out: + kfree(rs_resp); + return res; } /* Update AEM energy registers */ -- cgit v0.10.2 From 789e66612367f9975d704c9e4990025cbbbb45ec Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 9 Aug 2011 18:44:44 +0000 Subject: [CIFS] Cleanup use of CONFIG_CIFS_STATS2 ifdef to make transport routines more readable Christoph had requested that the stats related code (in CONFIG_CIFS_STATS2) be moved into helpers to make code flow more readable. This patch should help. For example the following section from transport.c spin_unlock(&GlobalMid_Lock); atomic_inc(&ses->server->num_waiters); wait_event(ses->server->request_q, atomic_read(&ses->server->inFlight) < cifs_max_pending); atomic_dec(&ses->server->num_waiters); spin_lock(&GlobalMid_Lock); becomes simpler (with the patch below): spin_unlock(&GlobalMid_Lock); cifs_num_waiters_inc(server); wait_event(server->request_q, atomic_read(&server->inFlight) < cifs_max_pending); cifs_num_waiters_dec(server); spin_lock(&GlobalMid_Lock); Reviewed-by: Jeff Layton CC: Christoph Hellwig Signed-off-by: Steve French Reviewed-by: Pavel Shilovsky diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 2fe3cf1..6d40656 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -176,7 +176,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) #ifdef CONFIG_CIFS_STATS2 seq_printf(m, " In Send: %d In MaxReq Wait: %d", - atomic_read(&server->inSend), + atomic_read(&server->in_send), atomic_read(&server->num_waiters)); #endif diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 38ce6d4..95dad9d 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -291,7 +291,7 @@ struct TCP_Server_Info { struct fscache_cookie *fscache; /* client index cache cookie */ #endif #ifdef CONFIG_CIFS_STATS2 - atomic_t inSend; /* requests trying to send */ + atomic_t in_send; /* requests trying to send */ atomic_t num_waiters; /* blocked waiting to get in sendrecv */ #endif }; @@ -672,12 +672,54 @@ struct mid_q_entry { bool multiEnd:1; /* both received */ }; -struct oplock_q_entry { - struct list_head qhead; - struct inode *pinode; - struct cifs_tcon *tcon; - __u16 netfid; -}; +/* Make code in transport.c a little cleaner by moving + update of optional stats into function below */ +#ifdef CONFIG_CIFS_STATS2 + +static inline void cifs_in_send_inc(struct TCP_Server_Info *server) +{ + atomic_inc(&server->in_send); +} + +static inline void cifs_in_send_dec(struct TCP_Server_Info *server) +{ + atomic_dec(&server->in_send); +} + +static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server) +{ + atomic_inc(&server->num_waiters); +} + +static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server) +{ + atomic_dec(&server->num_waiters); +} + +static inline void cifs_save_when_sent(struct mid_q_entry *mid) +{ + mid->when_sent = jiffies; +} +#else +static inline void cifs_in_send_inc(struct TCP_Server_Info *server) +{ +} +static inline void cifs_in_send_dec(struct TCP_Server_Info *server) +{ +} + +static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server) +{ +} + +static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server) +{ +} + +static inline void cifs_save_when_sent(struct mid_q_entry *mid) +{ +} +#endif /* for pending dnotify requests */ struct dir_notify_req { diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c1b9c4b..10ca6b2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -266,15 +266,11 @@ static int wait_for_free_request(struct TCP_Server_Info *server, while (1) { if (atomic_read(&server->inFlight) >= cifs_max_pending) { spin_unlock(&GlobalMid_Lock); -#ifdef CONFIG_CIFS_STATS2 - atomic_inc(&server->num_waiters); -#endif + cifs_num_waiters_inc(server); wait_event(server->request_q, atomic_read(&server->inFlight) < cifs_max_pending); -#ifdef CONFIG_CIFS_STATS2 - atomic_dec(&server->num_waiters); -#endif + cifs_num_waiters_dec(server); spin_lock(&GlobalMid_Lock); } else { if (server->tcpStatus == CifsExiting) { @@ -381,15 +377,13 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, mid->callback = callback; mid->callback_data = cbdata; mid->midState = MID_REQUEST_SUBMITTED; -#ifdef CONFIG_CIFS_STATS2 - atomic_inc(&server->inSend); -#endif + + cifs_in_send_inc(server); rc = smb_sendv(server, iov, nvec); -#ifdef CONFIG_CIFS_STATS2 - atomic_dec(&server->inSend); - mid->when_sent = jiffies; -#endif + cifs_in_send_dec(server); + cifs_save_when_sent(mid); mutex_unlock(&server->srv_mutex); + if (rc) goto out_err; @@ -575,14 +569,10 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, } midQ->midState = MID_REQUEST_SUBMITTED; -#ifdef CONFIG_CIFS_STATS2 - atomic_inc(&ses->server->inSend); -#endif + cifs_in_send_inc(ses->server); rc = smb_sendv(ses->server, iov, n_vec); -#ifdef CONFIG_CIFS_STATS2 - atomic_dec(&ses->server->inSend); - midQ->when_sent = jiffies; -#endif + cifs_in_send_dec(ses->server); + cifs_save_when_sent(midQ); mutex_unlock(&ses->server->srv_mutex); @@ -703,14 +693,11 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, } midQ->midState = MID_REQUEST_SUBMITTED; -#ifdef CONFIG_CIFS_STATS2 - atomic_inc(&ses->server->inSend); -#endif + + cifs_in_send_inc(ses->server); rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); -#ifdef CONFIG_CIFS_STATS2 - atomic_dec(&ses->server->inSend); - midQ->when_sent = jiffies; -#endif + cifs_in_send_dec(ses->server); + cifs_save_when_sent(midQ); mutex_unlock(&ses->server->srv_mutex); if (rc < 0) @@ -843,14 +830,10 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, } midQ->midState = MID_REQUEST_SUBMITTED; -#ifdef CONFIG_CIFS_STATS2 - atomic_inc(&ses->server->inSend); -#endif + cifs_in_send_inc(ses->server); rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); -#ifdef CONFIG_CIFS_STATS2 - atomic_dec(&ses->server->inSend); - midQ->when_sent = jiffies; -#endif + cifs_in_send_dec(ses->server); + cifs_save_when_sent(midQ); mutex_unlock(&ses->server->srv_mutex); if (rc < 0) { -- cgit v0.10.2 From e22906c564c2f9c73ee4621ef3b93fe374539f00 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Tue, 9 Aug 2011 14:30:39 -0500 Subject: cifs: Do not set cifs/ntfs acl using a file handle (try #4) Set security descriptor using path name instead of a file handle. We can't be sure that the file handle has adequate permission to set a security descriptor (to modify DACL). Function set_cifs_acl_by_fid() has been removed since we can't be sure how a file was opened for writing, a valid request can fail if the file was not opened with two above mentioned permissions. We could have opted to add on WRITE_DAC and WRITE_OWNER permissions to file opens and then use that file handle but adding addtional permissions such as WRITE_DAC and WRITE_OWNER could cause an any open to fail. And it was incorrect to look for read file handle to set a security descriptor anyway. Signed-off-by: Shirish Pargaonkar Signed-off-by: Steve French diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 21de1d6..d0f59fa 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -991,24 +991,6 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, return pntsd; } -static int set_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, __u16 fid, - struct cifs_ntsd *pnntsd, u32 acllen) -{ - int xid, rc; - struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); - - if (IS_ERR(tlink)) - return PTR_ERR(tlink); - - xid = GetXid(); - rc = CIFSSMBSetCIFSACL(xid, tlink_tcon(tlink), fid, pnntsd, acllen); - FreeXid(xid); - cifs_put_tlink(tlink); - - cFYI(DBG2, "SetCIFSACL rc = %d", rc); - return rc; -} - static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, struct cifs_ntsd *pnntsd, u32 acllen) { @@ -1047,18 +1029,10 @@ int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen, struct inode *inode, const char *path) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); - struct cifsFileInfo *open_file; - int rc; cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode); - open_file = find_readable_file(CIFS_I(inode), true); - if (!open_file) - return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen); - - rc = set_cifs_acl_by_fid(cifs_sb, open_file->netfid, pnntsd, acllen); - cifsFileInfo_put(open_file); - return rc; + return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen); } /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ -- cgit v0.10.2 From 72fa59970f8698023045ab0713d66f3f4f96945c Mon Sep 17 00:00:00 2001 From: Vasiliy Kulikov Date: Mon, 8 Aug 2011 19:02:04 +0400 Subject: move RLIMIT_NPROC check from set_user() to do_execve_common() The patch http://lkml.org/lkml/2003/7/13/226 introduced an RLIMIT_NPROC check in set_user() to check for NPROC exceeding via setuid() and similar functions. Before the check there was a possibility to greatly exceed the allowed number of processes by an unprivileged user if the program relied on rlimit only. But the check created new security threat: many poorly written programs simply don't check setuid() return code and believe it cannot fail if executed with root privileges. So, the check is removed in this patch because of too often privilege escalations related to buggy programs. The NPROC can still be enforced in the common code flow of daemons spawning user processes. Most of daemons do fork()+setuid()+execve(). The check introduced in execve() (1) enforces the same limit as in setuid() and (2) doesn't create similar security issues. Neil Brown suggested to track what specific process has exceeded the limit by setting PF_NPROC_EXCEEDED process flag. With the change only this process would fail on execve(), and other processes' execve() behaviour is not changed. Solar Designer suggested to re-check whether NPROC limit is still exceeded at the moment of execve(). If the process was sleeping for days between set*uid() and execve(), and the NPROC counter step down under the limit, the defered execve() failure because NPROC limit was exceeded days ago would be unexpected. If the limit is not exceeded anymore, we clear the flag on successful calls to execve() and fork(). The flag is also cleared on successful calls to set_user() as the limit was exceeded for the previous user, not the current one. Similar check was introduced in -ow patches (without the process flag). v3 - clear PF_NPROC_EXCEEDED on successful calls to set_user(). Reviewed-by: James Morris Signed-off-by: Vasiliy Kulikov Acked-by: NeilBrown Signed-off-by: Linus Torvalds diff --git a/fs/exec.c b/fs/exec.c index da80612..25dcbe5 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1459,6 +1459,23 @@ static int do_execve_common(const char *filename, struct files_struct *displaced; bool clear_in_exec; int retval; + const struct cred *cred = current_cred(); + + /* + * We move the actual failure in case of RLIMIT_NPROC excess from + * set*uid() to execve() because too many poorly written programs + * don't check setuid() return code. Here we additionally recheck + * whether NPROC limit is still exceeded. + */ + if ((current->flags & PF_NPROC_EXCEEDED) && + atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) { + retval = -EAGAIN; + goto out_ret; + } + + /* We're below the limit (still or again), so we don't want to make + * further execve() calls fail. */ + current->flags &= ~PF_NPROC_EXCEEDED; retval = unshare_files(&displaced); if (retval) diff --git a/include/linux/sched.h b/include/linux/sched.h index 20b03bf..4ac2c05 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1767,6 +1767,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ #define PF_MEMALLOC 0x00000800 /* Allocating memory */ +#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ diff --git a/kernel/cred.c b/kernel/cred.c index 174fa84..8ef31f5 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -508,10 +508,8 @@ int commit_creds(struct cred *new) key_fsgid_changed(task); /* do it - * - What if a process setreuid()'s and this brings the - * new uid over his NPROC rlimit? We can check this now - * cheaply with the new uid cache, so if it matters - * we should be checking for it. -DaveM + * RLIMIT_NPROC limits on user->processes have already been checked + * in set_user(). */ alter_cred_subscribers(new, 2); if (new->user != old->user) diff --git a/kernel/fork.c b/kernel/fork.c index e7ceaca..8e6b6f4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1111,6 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->real_cred->user != INIT_USER) goto bad_fork_free; } + current->flags &= ~PF_NPROC_EXCEEDED; retval = copy_creds(p, clone_flags); if (retval < 0) diff --git a/kernel/sys.c b/kernel/sys.c index a101ba3..dd948a1 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -621,11 +621,18 @@ static int set_user(struct cred *new) if (!new_user) return -EAGAIN; + /* + * We don't fail in case of NPROC limit excess here because too many + * poorly written programs don't check set*uid() return code, assuming + * it never fails if called by root. We may still enforce NPROC limit + * for programs doing set*uid()+execve() by harmlessly deferring the + * failure to the execve() stage. + */ if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && - new_user != INIT_USER) { - free_uid(new_user); - return -EAGAIN; - } + new_user != INIT_USER) + current->flags |= PF_NPROC_EXCEEDED; + else + current->flags &= ~PF_NPROC_EXCEEDED; free_uid(new->user); new->user = new_user; -- cgit v0.10.2 From 4b1bfb7d2d125af6653d6c2305356b2677f79dc6 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 10 Aug 2011 15:32:22 +0200 Subject: rt2x00: fix crash in rt2800usb_write_tx_desc Patch should fix this oops: BUG: unable to handle kernel NULL pointer dereference at 000000a0 IP: [] rt2800usb_write_tx_desc+0x18/0xc0 [rt2800usb] *pdpt = 000000002408c001 *pde = 0000000024079067 *pte = 0000000000000000 Oops: 0000 [#1] SMP EIP: 0060:[] EFLAGS: 00010282 CPU: 0 EIP is at rt2800usb_write_tx_desc+0x18/0xc0 [rt2800usb] EAX: 00000035 EBX: ef2bef10 ECX: 00000000 EDX: d40958a0 ESI: ef1865f8 EDI: ef1865f8 EBP: d4095878 ESP: d409585c DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Call Trace: [] rt2x00queue_write_tx_frame+0x155/0x300 [rt2x00lib] [] rt2x00mac_tx+0x7c/0x370 [rt2x00lib] [] ? mark_held_locks+0x62/0x90 [] ? _raw_spin_unlock_irqrestore+0x35/0x60 [] ? trace_hardirqs_on_caller+0x5a/0x170 [] ? trace_hardirqs_on+0xb/0x10 [] __ieee80211_tx+0x5c/0x1e0 [mac80211] [] ieee80211_tx+0xbc/0xe0 [mac80211] [] ? ieee80211_tx+0x23/0xe0 [mac80211] [] ieee80211_xmit+0xc1/0x200 [mac80211] [] ? ieee80211_tx+0xe0/0xe0 [mac80211] [] ? lock_release_holdtime+0x35/0x1b0 [] ? ieee80211_subif_start_xmit+0x446/0x5f0 [mac80211] [] ieee80211_subif_start_xmit+0x29d/0x5f0 [mac80211] [] ? ieee80211_subif_start_xmit+0x3e4/0x5f0 [mac80211] [] ? sock_setsockopt+0x6a8/0x6f0 [] ? sock_setsockopt+0x520/0x6f0 [] dev_hard_start_xmit+0x2ef/0x650 Oops might happen because we perform parallel putting new entries in a queue (rt2x00queue_write_tx_frame()) and removing entries after finishing transmitting (rt2800usb_work_txdone()). There are cases when _txdone may process an entry that was not fully send and nullify entry->skb . To fix check in _txdone if entry has flags that indicate pending transmission and wait until flags get cleared. Reported-by: Justin Piszcz Cc: stable@kernel.org Signed-off-by: Stanislaw Gruszka Acked-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 9395631..2cb25ea 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -464,6 +464,15 @@ static bool rt2800usb_txdone_entry_check(struct queue_entry *entry, u32 reg) int wcid, ack, pid; int tx_wcid, tx_ack, tx_pid; + if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || + !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) { + WARNING(entry->queue->rt2x00dev, + "Data pending for entry %u in queue %u\n", + entry->entry_idx, entry->queue->qid); + cond_resched(); + return false; + } + wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); @@ -558,8 +567,10 @@ static void rt2800usb_work_txdone(struct work_struct *work) while (!rt2x00queue_empty(queue)) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); - if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) + if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || + !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; + if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); else if (rt2x00queue_status_timeout(entry)) -- cgit v0.10.2 From df71c9cfceea801e7e26e2c74241758ef9c042e5 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 10 Aug 2011 15:32:23 +0200 Subject: rt2x00: fix order of entry flags modification In rt2800usb_work_txdone we check flags in order: - ENTRY_OWNER_DEVICE_DATA - ENTRY_DATA_STATUS_PENDING - ENTRY_DATA_IO_FAILED Modify flags in separate order in rt2x00usb_interrupt_txdone, to avoid processing entries in _txdone with wrong flags or skip processing ready entries. Reported-by: Justin Piszcz Cc: stable@kernel.org Signed-off-by: Stanislaw Gruszka Acked-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index b6b4542..7fbb55c 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -262,23 +262,20 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; - if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) + if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return; - - if (rt2x00dev->ops->lib->tx_dma_done) - rt2x00dev->ops->lib->tx_dma_done(entry); - - /* - * Report the frame as DMA done - */ - rt2x00lib_dmadone(entry); - /* * Check if the frame was correctly uploaded */ if (urb->status) set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); + /* + * Report the frame as DMA done + */ + rt2x00lib_dmadone(entry); + if (rt2x00dev->ops->lib->tx_dma_done) + rt2x00dev->ops->lib->tx_dma_done(entry); /* * Schedule the delayed work for reading the TX status * from the device. -- cgit v0.10.2 From 674db1344443204b6ce3293f2df8fd1b7665deea Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Wed, 10 Aug 2011 15:32:24 +0200 Subject: rt2x00: fix crash in rt2800usb_get_txwi Patch should fix this oops: BUG: unable to handle kernel NULL pointer dereference at 000000a0 IP: [] rt2800usb_get_txwi+0x19/0x70 [rt2800usb] *pdpt = 0000000000000000 *pde = f000ff53f000ff53 Oops: 0000 [#1] SMP Pid: 198, comm: kworker/u:3 Tainted: G W 3.0.0-wl+ #9 LENOVO 6369CTO/6369CTO EIP: 0060:[] EFLAGS: 00010283 CPU: 1 EIP is at rt2800usb_get_txwi+0x19/0x70 [rt2800usb] EAX: 00000000 EBX: f465e140 ECX: f4494960 EDX: ef24c5f8 ESI: 810f21f5 EDI: f1da9960 EBP: f4581e80 ESP: f4581e70 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 Process kworker/u:3 (pid: 198, ti=f4580000 task=f4494960 task.ti=f4580000) Call Trace: [] rt2800_txdone_entry+0x2f/0xf0 [rt2800lib] [] ? warn_slowpath_common+0x7d/0xa0 [] ? rt2800usb_work_txdone+0x288/0x360 [rt2800usb] [] ? rt2800usb_work_txdone+0x288/0x360 [rt2800usb] [] rt2800usb_work_txdone+0x263/0x360 [rt2800usb] [] process_one_work+0x186/0x440 [] ? process_one_work+0x10a/0x440 [] ? rt2800usb_probe_hw+0x120/0x120 [rt2800usb] [] worker_thread+0x133/0x310 [] ? trace_hardirqs_on+0xb/0x10 [] ? manage_workers+0x1e0/0x1e0 [] kthread+0x7c/0x90 [] ? __init_kthread_worker+0x60/0x60 [] kernel_thread_helper+0x6/0x1 Oops might happen because we check rt2x00queue_empty(queue) twice, but this condition can change and we can process entry in rt2800_txdone_entry(), which was already processed by rt2800usb_txdone_entry_check() -> rt2x00lib_txdone_noinfo() and has nullify entry->skb . Reported-by: Justin Piszcz Cc: stable@kernel.org Signed-off-by: Stanislaw Gruszka Acked-by: Ivo van Doorn Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 2cb25ea..dbf501c 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -538,12 +538,11 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev) entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); if (rt2800usb_txdone_entry_check(entry, reg)) break; + entry = NULL; } - if (!entry || rt2x00queue_empty(queue)) - break; - - rt2800_txdone_entry(entry, reg); + if (entry) + rt2800_txdone_entry(entry, reg); } } -- cgit v0.10.2 From c407bee8a56d874b91f3e4ee790660959ff1a25e Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 22 Jul 2011 06:21:51 +0000 Subject: e1000e: alternate MAC address does not work on device id 0x1060 This issue is present all the way back to 2.6.34 kernels. CC: Signed-off-by: Bruce Allan Tested-by: Jeffrey Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 7898a67..58af091 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -190,7 +190,8 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ if (!((nvm_data & NVM_COMPAT_LOM) || (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || - (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES))) goto out; ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, -- cgit v0.10.2 From 7f99ae633884043c70f4cc4a03f43dad0f0ecba2 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 22 Jul 2011 06:21:35 +0000 Subject: e1000e: do not disable receiver on 82574/82583 Due to a hardware erratum, the receiver on 82574 and 82583 should not be stopped once it has been started. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index 480f259..536b3a5 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -2085,7 +2085,8 @@ struct e1000_info e1000_82574_info = { | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_CHECK_PHY_HANG - | FLAG2_DISABLE_ASPM_L0S, + | FLAG2_DISABLE_ASPM_L0S + | FLAG2_NO_DISABLE_RX, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, @@ -2104,7 +2105,8 @@ struct e1000_info e1000_82583_info = { | FLAG_HAS_AMT | FLAG_HAS_JUMBO_FRAMES | FLAG_HAS_CTRLEXT_ON_LOAD, - .flags2 = FLAG2_DISABLE_ASPM_L0S, + .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_NO_DISABLE_RX, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 638d175..35916f4 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -453,6 +453,7 @@ struct e1000_info { #define FLAG2_DISABLE_ASPM_L0S (1 << 7) #define FLAG2_DISABLE_AIM (1 << 8) #define FLAG2_CHECK_PHY_HANG (1 << 9) +#define FLAG2_NO_DISABLE_RX (1 << 10) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 06d88f3..6a0526a 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -1206,7 +1206,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) rx_ring->next_to_clean = 0; rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); ew32(RDBAH, ((u64) rx_ring->dma >> 32)); ew32(RDLEN, rx_ring->size); diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index ab4be80..c0406b1 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -2915,7 +2915,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) /* disable receives while setting up the descriptors */ rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); e1e_flush(); usleep_range(10000, 20000); @@ -3394,7 +3395,8 @@ void e1000e_down(struct e1000_adapter *adapter) /* disable receives in the hardware */ rctl = er32(RCTL); - ew32(RCTL, rctl & ~E1000_RCTL_EN); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ netif_stop_queue(netdev); @@ -3403,6 +3405,7 @@ void e1000e_down(struct e1000_adapter *adapter) tctl = er32(TCTL); tctl &= ~E1000_TCTL_EN; ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ e1e_flush(); usleep_range(10000, 20000); -- cgit v0.10.2 From 244735f6ebccbf72a283db89472309f770e14c80 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 29 Jul 2011 05:53:07 +0000 Subject: e1000e: alternate MAC address update If word 0x37 in the EEPROM is 0xFFFF _or_ 0x0000, then there is no alternate MAC address in the EEPROM. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 58af091..0893ab1 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -201,10 +201,10 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) goto out; } - if (nvm_alt_mac_addr_offset == 0xFFFF) { + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) /* There is no Alternate MAC Address */ goto out; - } if (hw->bus.func == E1000_FUNC_1) nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; -- cgit v0.10.2 From 12440928dca77eccc8a793cf3cd83d017abbd7d6 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 22 Jul 2011 06:22:02 +0000 Subject: e1000e: increase driver version number Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index c0406b1..362f703 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "1.3.16" DRV_EXTRAVERSION +#define DRV_VERSION "1.4.4" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; -- cgit v0.10.2 From d2db60df1e7eb39cf0f378dfc4dd8813666d46ef Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 11 Aug 2011 09:51:46 -0500 Subject: ext3: Properly count journal credits for long symlinks Commit ae54870a1dc9 ("ext3: Fix lock inversion in ext3_symlink()") recalculated the number of credits needed for a long symlink, in the process of splitting it into two transactions. However, the first credit calculation under-counted because if selinux is enabled, credits are needed to create the selinux xattr as well. Overrunning the reservation will result in an OOPS in journal_dirty_metadata() due to this assert: J_ASSERT_JH(jh, handle->h_buffer_credits > 0); Fix this by increasing the reservation size. Signed-off-by: Eric Sandeen Reviewed-by: Jan Kara Acked-by: "Theodore Ts'o" Cc: stable@kernel.org Signed-off-by: Linus Torvalds diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 6e18a0b..5571708 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2209,9 +2209,11 @@ static int ext3_symlink (struct inode * dir, /* * For non-fast symlinks, we just allocate inode and put it on * orphan list in the first transaction => we need bitmap, - * group descriptor, sb, inode block, quota blocks. + * group descriptor, sb, inode block, quota blocks, and + * possibly selinux xattr blocks. */ - credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); + credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + + EXT3_XATTR_TRANS_BLOCKS; } else { /* * Fast symlink. We have to add entry to directory -- cgit v0.10.2 From 8c20871998c082f6fbc963f1449a5ba5140ee39a Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 11 Aug 2011 09:54:31 -0500 Subject: ext4: Properly count journal credits for long symlinks Commit df5e6223407e ("ext4: fix deadlock in ext4_symlink() in ENOSPC conditions") recalculated the number of credits needed for a long symlink, in the process of splitting it into two transactions. However, the first credit calculation under-counted because if selinux is enabled, credits are needed to create the selinux xattr as well. Overrunning the reservation will result in an OOPS in jbd2_journal_dirty_metadata() due to this assert: J_ASSERT_JH(jh, handle->h_buffer_credits > 0); Fix this by increasing the reservation size. Signed-off-by: Eric Sandeen Reviewed-by: Jan Kara Acked-by: "Theodore Ts'o" Cc: stable@kernel.org Signed-off-by: Linus Torvalds diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 565a154..f8068c7 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2253,9 +2253,11 @@ static int ext4_symlink(struct inode *dir, /* * For non-fast symlinks, we just allocate inode and put it on * orphan list in the first transaction => we need bitmap, - * group descriptor, sb, inode block, quota blocks. + * group descriptor, sb, inode block, quota blocks, and + * possibly selinux xattr blocks. */ - credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); + credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) + + EXT4_XATTR_TRANS_BLOCKS; } else { /* * Fast symlink. We have to add entry to directory -- cgit v0.10.2 From f09aecd50f39d35372e551491d9f36ff0f51ee4d Mon Sep 17 00:00:00 2001 From: Sangbeom Kim Date: Wed, 20 Jul 2011 17:07:13 +0900 Subject: ASoC: SAMSUNG: Add I2S0 internal dma driver I2S in Exynos4 and S5PC110(S5PV210) has a internal dma. It can be used low power audio mode and 2nd channel transfer. This patch can support idma. [Reapplied after dependencies propagated through in 3.1-rc1. --broonie] Signed-off-by: Sangbeom Kim Acked-by: Jassi Brar Acked-by: Liam Girdwood Acked-by: Jassi Brar Signed-off-by: Mark Brown diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile index 9eb3b12..8509d3c 100644 --- a/sound/soc/samsung/Makefile +++ b/sound/soc/samsung/Makefile @@ -1,5 +1,6 @@ # S3c24XX Platform Support snd-soc-s3c24xx-objs := dma.o +snd-soc-idma-objs := idma.o snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o snd-soc-ac97-objs := ac97.o @@ -16,6 +17,7 @@ obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o +obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-idma.o # S3C24XX Machine Support snd-soc-jive-wm8750-objs := jive_wm8750.o diff --git a/sound/soc/samsung/idma.c b/sound/soc/samsung/idma.c new file mode 100644 index 0000000..ebde074 --- /dev/null +++ b/sound/soc/samsung/idma.c @@ -0,0 +1,453 @@ +/* + * sound/soc/samsung/idma.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * I2S0's Internal DMA driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "i2s.h" +#include "idma.h" +#include "dma.h" +#include "i2s-regs.h" + +#define ST_RUNNING (1<<0) +#define ST_OPENED (1<<1) + +static const struct snd_pcm_hardware idma_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_BLOCK_TRANSFER | + SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID | + SNDRV_PCM_INFO_PAUSE | + SNDRV_PCM_INFO_RESUME, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_U16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_U24_LE | + SNDRV_PCM_FMTBIT_U8 | + SNDRV_PCM_FMTBIT_S8, + .channels_min = 2, + .channels_max = 2, + .buffer_bytes_max = MAX_IDMA_BUFFER, + .period_bytes_min = 128, + .period_bytes_max = MAX_IDMA_PERIOD, + .periods_min = 1, + .periods_max = 2, +}; + +struct idma_ctrl { + spinlock_t lock; + int state; + dma_addr_t start; + dma_addr_t pos; + dma_addr_t end; + dma_addr_t period; + dma_addr_t periodsz; + void *token; + void (*cb)(void *dt, int bytes_xfer); +}; + +static struct idma_info { + spinlock_t lock; + void __iomem *regs; + dma_addr_t lp_tx_addr; +} idma; + +static void idma_getpos(dma_addr_t *src) +{ + *src = idma.lp_tx_addr + + (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4; +} + +static int idma_enqueue(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct idma_ctrl *prtd = substream->runtime->private_data; + u32 val; + + spin_lock(&prtd->lock); + prtd->token = (void *) substream; + spin_unlock(&prtd->lock); + + /* Internal DMA Level0 Interrupt Address */ + val = idma.lp_tx_addr + prtd->periodsz; + writel(val, idma.regs + I2SLVL0ADDR); + + /* Start address0 of I2S internal DMA operation. */ + val = idma.lp_tx_addr; + writel(val, idma.regs + I2SSTR0); + + /* + * Transfer block size for I2S internal DMA. + * Should decide transfer size before start dma operation + */ + val = readl(idma.regs + I2SSIZE); + val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT); + val |= (((runtime->dma_bytes >> 2) & + I2SSIZE_TRNMSK) << I2SSIZE_SHIFT); + writel(val, idma.regs + I2SSIZE); + + val = readl(idma.regs + I2SAHB); + val |= AHB_INTENLVL0; + writel(val, idma.regs + I2SAHB); + + return 0; +} + +static void idma_setcallbk(struct snd_pcm_substream *substream, + void (*cb)(void *, int)) +{ + struct idma_ctrl *prtd = substream->runtime->private_data; + + spin_lock(&prtd->lock); + prtd->cb = cb; + spin_unlock(&prtd->lock); +} + +static void idma_control(int op) +{ + u32 val = readl(idma.regs + I2SAHB); + + spin_lock(&idma.lock); + + switch (op) { + case LPAM_DMA_START: + val |= (AHB_INTENLVL0 | AHB_DMAEN); + break; + case LPAM_DMA_STOP: + val &= ~(AHB_INTENLVL0 | AHB_DMAEN); + break; + default: + spin_unlock(&idma.lock); + return; + } + + writel(val, idma.regs + I2SAHB); + spin_unlock(&idma.lock); +} + +static void idma_done(void *id, int bytes_xfer) +{ + struct snd_pcm_substream *substream = id; + struct idma_ctrl *prtd = substream->runtime->private_data; + + if (prtd && (prtd->state & ST_RUNNING)) + snd_pcm_period_elapsed(substream); +} + +static int idma_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct idma_ctrl *prtd = substream->runtime->private_data; + u32 mod = readl(idma.regs + I2SMOD); + u32 ahb = readl(idma.regs + I2SAHB); + + ahb |= (AHB_DMARLD | AHB_INTMASK); + mod |= MOD_TXS_IDMA; + writel(ahb, idma.regs + I2SAHB); + writel(mod, idma.regs + I2SMOD); + + snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); + runtime->dma_bytes = params_buffer_bytes(params); + + prtd->start = prtd->pos = runtime->dma_addr; + prtd->period = params_periods(params); + prtd->periodsz = params_period_bytes(params); + prtd->end = runtime->dma_addr + runtime->dma_bytes; + + idma_setcallbk(substream, idma_done); + + return 0; +} + +static int idma_hw_free(struct snd_pcm_substream *substream) +{ + snd_pcm_set_runtime_buffer(substream, NULL); + + return 0; +} + +static int idma_prepare(struct snd_pcm_substream *substream) +{ + struct idma_ctrl *prtd = substream->runtime->private_data; + + prtd->pos = prtd->start; + + /* flush the DMA channel */ + idma_control(LPAM_DMA_STOP); + idma_enqueue(substream); + + return 0; +} + +static int idma_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct idma_ctrl *prtd = substream->runtime->private_data; + int ret = 0; + + spin_lock(&prtd->lock); + + switch (cmd) { + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + prtd->state |= ST_RUNNING; + idma_control(LPAM_DMA_START); + break; + + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + prtd->state &= ~ST_RUNNING; + idma_control(LPAM_DMA_STOP); + break; + + default: + ret = -EINVAL; + break; + } + + spin_unlock(&prtd->lock); + + return ret; +} + +static snd_pcm_uframes_t + idma_pointer(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct idma_ctrl *prtd = runtime->private_data; + dma_addr_t src; + unsigned long res; + + spin_lock(&prtd->lock); + + idma_getpos(&src); + res = src - prtd->start; + + spin_unlock(&prtd->lock); + + return bytes_to_frames(substream->runtime, res); +} + +static int idma_mmap(struct snd_pcm_substream *substream, + struct vm_area_struct *vma) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + unsigned long size, offset; + int ret; + + /* From snd_pcm_lib_mmap_iomem */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_flags |= VM_IO; + size = vma->vm_end - vma->vm_start; + offset = vma->vm_pgoff << PAGE_SHIFT; + ret = io_remap_pfn_range(vma, vma->vm_start, + (runtime->dma_addr + offset) >> PAGE_SHIFT, + size, vma->vm_page_prot); + + return ret; +} + +static irqreturn_t iis_irq(int irqno, void *dev_id) +{ + struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id; + u32 iiscon, iisahb, val, addr; + + iisahb = readl(idma.regs + I2SAHB); + iiscon = readl(idma.regs + I2SCON); + + val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0; + + if (val) { + iisahb |= val; + writel(iisahb, idma.regs + I2SAHB); + + addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr; + addr += prtd->periodsz; + addr %= (prtd->end - prtd->start); + addr += idma.lp_tx_addr; + + writel(addr, idma.regs + I2SLVL0ADDR); + + if (prtd->cb) + prtd->cb(prtd->token, prtd->period); + } + + return IRQ_HANDLED; +} + +static int idma_open(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct idma_ctrl *prtd; + int ret; + + snd_soc_set_runtime_hwparams(substream, &idma_hardware); + + prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL); + if (prtd == NULL) + return -ENOMEM; + + ret = request_irq(IRQ_I2S0, iis_irq, 0, "i2s", prtd); + if (ret < 0) { + pr_err("fail to claim i2s irq , ret = %d\n", ret); + kfree(prtd); + return ret; + } + + spin_lock_init(&prtd->lock); + + runtime->private_data = prtd; + + return 0; +} + +static int idma_close(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct idma_ctrl *prtd = runtime->private_data; + + free_irq(IRQ_I2S0, prtd); + + if (!prtd) + pr_err("idma_close called with prtd == NULL\n"); + + kfree(prtd); + + return 0; +} + +static struct snd_pcm_ops idma_ops = { + .open = idma_open, + .close = idma_close, + .ioctl = snd_pcm_lib_ioctl, + .trigger = idma_trigger, + .pointer = idma_pointer, + .mmap = idma_mmap, + .hw_params = idma_hw_params, + .hw_free = idma_hw_free, + .prepare = idma_prepare, +}; + +static void idma_free(struct snd_pcm *pcm) +{ + struct snd_pcm_substream *substream; + struct snd_dma_buffer *buf; + + substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; + if (!substream) + return; + + buf = &substream->dma_buffer; + if (!buf->area) + return; + + iounmap(buf->area); + + buf->area = NULL; + buf->addr = 0; +} + +static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) +{ + struct snd_pcm_substream *substream = pcm->streams[stream].substream; + struct snd_dma_buffer *buf = &substream->dma_buffer; + + buf->dev.dev = pcm->card->dev; + buf->private_data = NULL; + + /* Assign PCM buffer pointers */ + buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS; + buf->addr = idma.lp_tx_addr; + buf->bytes = idma_hardware.buffer_bytes_max; + buf->area = (unsigned char *)ioremap(buf->addr, buf->bytes); + + return 0; +} + +static u64 idma_mask = DMA_BIT_MASK(32); + +static int idma_new(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_card *card = rtd->card->snd_card; + struct snd_soc_dai *dai = rtd->cpu_dai; + struct snd_pcm *pcm = rtd->pcm; + int ret = 0; + + if (!card->dev->dma_mask) + card->dev->dma_mask = &idma_mask; + if (!card->dev->coherent_dma_mask) + card->dev->coherent_dma_mask = DMA_BIT_MASK(32); + + if (dai->driver->playback.channels_min) + ret = preallocate_idma_buffer(pcm, + SNDRV_PCM_STREAM_PLAYBACK); + + return ret; +} + +void idma_reg_addr_init(void *regs, dma_addr_t addr) +{ + spin_lock_init(&idma.lock); + idma.regs = regs; + idma.lp_tx_addr = addr; +} + +struct snd_soc_platform_driver asoc_idma_platform = { + .ops = &idma_ops, + .pcm_new = idma_new, + .pcm_free = idma_free, +}; + +static int __devinit asoc_idma_platform_probe(struct platform_device *pdev) +{ + return snd_soc_register_platform(&pdev->dev, &asoc_idma_platform); +} + +static int __devexit asoc_idma_platform_remove(struct platform_device *pdev) +{ + snd_soc_unregister_platform(&pdev->dev); + return 0; +} + +static struct platform_driver asoc_idma_driver = { + .driver = { + .name = "samsung-idma", + .owner = THIS_MODULE, + }, + + .probe = asoc_idma_platform_probe, + .remove = __devexit_p(asoc_idma_platform_remove), +}; + +static int __init asoc_idma_init(void) +{ + return platform_driver_register(&asoc_idma_driver); +} +module_init(asoc_idma_init); + +static void __exit asoc_idma_exit(void) +{ + platform_driver_unregister(&asoc_idma_driver); +} +module_exit(asoc_idma_exit); + +MODULE_AUTHOR("Jaswinder Singh, "); +MODULE_DESCRIPTION("Samsung ASoC IDMA Driver"); +MODULE_LICENSE("GPL"); diff --git a/sound/soc/samsung/idma.h b/sound/soc/samsung/idma.h new file mode 100644 index 0000000..4827321 --- /dev/null +++ b/sound/soc/samsung/idma.h @@ -0,0 +1,26 @@ +/* + * sound/soc/samsung/idma.h + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __SND_SOC_SAMSUNG_IDMA_H_ +#define __SND_SOC_SAMSUNG_IDMA_H_ + +extern void idma_reg_addr_init(void *regs, dma_addr_t addr); + +/* dma_state */ +#define LPAM_DMA_STOP 0 +#define LPAM_DMA_START 1 + +#define MAX_IDMA_PERIOD (128 * 1024) +#define MAX_IDMA_BUFFER (160 * 1024) + +#endif /* __SND_SOC_SAMSUNG_IDMA_H_ */ -- cgit v0.10.2 From 8cf1fb21632d302fad6404f891b002ab8c13b1b4 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 11 Aug 2011 14:29:25 -0700 Subject: pnfs: Automatically select blocks & objects layouts Just like files-layout, blocks & objects layouts are part of the NFS 4.1 protocol and should be automatically selected if NFS_4_1 is selected. The small problem is that these depend on other Kernel support being present, while files only depends on NFS itself. This patch removes from the user choice the presence of objects and blocks layout. But makes sure these are selected only if the depended subsystems are present in the Kernel. Signed-off-by: Boaz Harrosh Acked-by: Peng Tao Signed-off-by: Linus Torvalds diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 0d30613..dbcd821 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -89,24 +89,14 @@ config PNFS_FILE_LAYOUT tristate config PNFS_BLOCK - tristate "Provide support for the pNFS Block Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)" + tristate depends on NFS_FS && NFS_V4_1 && BLK_DEV_DM - help - Say M here if you want your pNFS client to support the Block Layout Driver - (RFC 5663). Requires Multiple devices driver support (DM) and Device mapper - support (BLK_DEV_DM). - - If unsure, say N. + default m config PNFS_OBJLAYOUT - tristate "Provide support for the pNFS Objects Layout Driver for NFSv4.1 pNFS (EXPERIMENTAL)" + tristate depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD - help - Say M here if you want your pNFS client to support the Objects Layout Driver. - Requires the SCSI osd initiator library (SCSI_OSD_INITIATOR) and - upper level driver (SCSI_OSD_ULD). - - If unsure, say N. + default m config ROOT_NFS bool "Root file system on NFS" -- cgit v0.10.2 From c92761fd9efcbbcb59e7bf4db88e29ce03229889 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 11 Aug 2011 17:58:59 -0700 Subject: sparc: Don't do hypervisor calls on non-sun4v in DS driver. Reported-by: Pieter-Paul Giesberts Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 490e541..7429b47 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -1256,13 +1256,14 @@ static int __init ds_init(void) { unsigned long hv_ret, major, minor; - hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); - if (hv_ret == HV_EOK) { - pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", - major, minor); - reboot_data_supported = 1; + if (tlb_type == hypervisor) { + hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); + if (hv_ret == HV_EOK) { + pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", + major, minor); + reboot_data_supported = 1; + } } - kthread_run(ds_thread, NULL, "kldomd"); return vio_register_driver(&ds_driver); -- cgit v0.10.2 From b33f9cbd67ba1a1c46879ec66467269f09cde8e5 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Thu, 11 Aug 2011 11:59:10 -0600 Subject: regmap: Specify a module license CONFIG_REGMAP_I2C/SPI are set to m when selected by a tristate config option that's set to m. The regmap modules don't specify a license, so fail to link to regmap_init at load time, since that is EXPORT_SYMBOL_GPL. Fix this by specifying a license for the regmap modules. Signed-off-by: Stephen Warren Signed-off-by: Mark Brown diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index c2231ff..c4f7a45 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -113,3 +113,4 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c, } EXPORT_SYMBOL_GPL(regmap_init_i2c); +MODULE_LICENSE("GPL"); diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 4deba06..2bbc659 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -70,3 +70,5 @@ struct regmap *regmap_init_spi(struct spi_device *spi, return regmap_init(&spi->dev, ®map_spi, config); } EXPORT_SYMBOL_GPL(regmap_init_spi); + +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 7ec41ee5ad5f716f67041c0d49014d0becb5332c Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Thu, 11 Aug 2011 15:44:57 +0300 Subject: ASoC: omap: Update e-mail address of Jarkko Nikula My gmail account got disabled and I'm not going to reopen it. Signed-off-by: Jarkko Nikula Acked-by: Liam Girdwood Signed-off-by: Mark Brown diff --git a/MAINTAINERS b/MAINTAINERS index 51d42fb..46e3e6b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4604,7 +4604,7 @@ F: arch/arm/mach-omap2/clockdomain2xxx_3xxx.c F: arch/arm/mach-omap2/clockdomain44xx.c OMAP AUDIO SUPPORT -M: Jarkko Nikula +M: Jarkko Nikula L: alsa-devel@alsa-project.org (subscribers-only) L: linux-omap@vger.kernel.org S: Maintained diff --git a/include/sound/tlv320aic3x.h b/include/sound/tlv320aic3x.h index 99e0308..ffd9bc7 100644 --- a/include/sound/tlv320aic3x.h +++ b/include/sound/tlv320aic3x.h @@ -1,7 +1,7 @@ /* * Platform data for Texas Instruments TLV320AIC3x codec * - * Author: Jarkko Nikula + * Author: Jarkko Nikula * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c index 83d213b..62e292f 100644 --- a/sound/soc/omap/n810.c +++ b/sound/soc/omap/n810.c @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Nokia Corporation * - * Contact: Jarkko Nikula + * Contact: Jarkko Nikula * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -402,6 +402,6 @@ static void __exit n810_soc_exit(void) module_init(n810_soc_init); module_exit(n810_soc_exit); -MODULE_AUTHOR("Jarkko Nikula "); +MODULE_AUTHOR("Jarkko Nikula "); MODULE_DESCRIPTION("ALSA SoC Nokia N810"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index 07b7723..ebcc2d4 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Nokia Corporation * - * Contact: Jarkko Nikula + * Contact: Jarkko Nikula * Peter Ujfalusi * * This program is free software; you can redistribute it and/or @@ -780,6 +780,6 @@ static void __exit snd_omap_mcbsp_exit(void) } module_exit(snd_omap_mcbsp_exit); -MODULE_AUTHOR("Jarkko Nikula "); +MODULE_AUTHOR("Jarkko Nikula "); MODULE_DESCRIPTION("OMAP I2S SoC Interface"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h index 9a7dedd..65cde9d 100644 --- a/sound/soc/omap/omap-mcbsp.h +++ b/sound/soc/omap/omap-mcbsp.h @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Nokia Corporation * - * Contact: Jarkko Nikula + * Contact: Jarkko Nikula * Peter Ujfalusi * * This program is free software; you can redistribute it and/or diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index b2f5751..9b5c88a 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Nokia Corporation * - * Contact: Jarkko Nikula + * Contact: Jarkko Nikula * Peter Ujfalusi * * This program is free software; you can redistribute it and/or @@ -436,6 +436,6 @@ static void __exit snd_omap_pcm_exit(void) } module_exit(snd_omap_pcm_exit); -MODULE_AUTHOR("Jarkko Nikula "); +MODULE_AUTHOR("Jarkko Nikula "); MODULE_DESCRIPTION("OMAP PCM DMA module"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/omap/omap-pcm.h b/sound/soc/omap/omap-pcm.h index a0ed1db..f95fe30 100644 --- a/sound/soc/omap/omap-pcm.h +++ b/sound/soc/omap/omap-pcm.h @@ -3,7 +3,7 @@ * * Copyright (C) 2008 Nokia Corporation * - * Contact: Jarkko Nikula + * Contact: Jarkko Nikula * Peter Ujfalusi * * This program is free software; you can redistribute it and/or diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c index 0aae998..893300a 100644 --- a/sound/soc/omap/rx51.c +++ b/sound/soc/omap/rx51.c @@ -5,7 +5,7 @@ * * Contact: Peter Ujfalusi * Eduardo Valentin - * Jarkko Nikula + * Jarkko Nikula * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License -- cgit v0.10.2 From 73104b5cfe3067d68f2c2de3f3d4d4964c55873e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 9 Aug 2011 17:09:06 +0000 Subject: drm/radeon/kms: don't enable connectors that are off in the hotplug handler If we get a hotplug event on an connector that is off, don't attempt to turn it on or off, it should already be off. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=728228 Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 6d6b5f1..519b5e2 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -60,6 +60,10 @@ void radeon_connector_hotplug(struct drm_connector *connector) radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); + /* if the connector is already off, don't turn it back on */ + if (connector->dpms != DRM_MODE_DPMS_ON) + return; + /* powering up/down the eDP panel generates hpd events which * can interfere with modesetting. */ -- cgit v0.10.2 From 33ae1827d6c3c79c5957536ec29d5a8780623147 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 11 Aug 2011 14:01:03 +0000 Subject: drm/radeon/kms: fix regression is handling >2 heads on cedar/caicos Need to add support for 4 crtcs when setting the possible crtcs for the encoders. Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index b293487..319d85d 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -2323,6 +2323,9 @@ radeon_add_atom_encoder(struct drm_device *dev, default: encoder->possible_crtcs = 0x3; break; + case 4: + encoder->possible_crtcs = 0xf; + break; case 6: encoder->possible_crtcs = 0x3f; break; -- cgit v0.10.2 From 92bdfd4a35415dd3741b95df60782a32c586d399 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 4 Aug 2011 17:28:40 +0000 Subject: drm/radeon/kms: make some watermark messages debug only Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 14dce9f..fb5fa08 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -743,7 +743,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, !evergreen_average_bandwidth_vs_available_bandwidth(&wm) || !evergreen_check_latency_hiding(&wm) || (rdev->disp_priority == 2)) { - DRM_INFO("force priority to high\n"); + DRM_DEBUG_KMS("force priority to high\n"); priority_a_cnt |= PRIORITY_ALWAYS_ON; priority_b_cnt |= PRIORITY_ALWAYS_ON; } -- cgit v0.10.2 From 13bb9430cd6154d1f088549656c4a3ed10eaf35e Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 8 Aug 2011 16:21:15 +0000 Subject: drm/radeon: Allow panel preferred EDID to override BIOS native mode We have two sources of information about panel capabilities on mobile radeon - the BIOS, which gives us a native mode, and the panel's preferred mode. In theory these two will always match, but there's some corner cases where the BIOS hasn't been fully initialised and so the native mode in it ends up with default values. However, if we get a panel with reasonable EDID, it's probably the case that the panel's preferred mode does actually represent the panel capabilities. This patch handles that case by replacing the native mode with the panel's preferred mode if the resolutions don't match. Systems without a valid internal panel EDID will still use the BIOS native mode. Signed-off-by: Matthew Garrett Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 519b5e2..441e070 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -478,11 +478,19 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_display_mode *native_mode = &radeon_encoder->native_mode; + struct drm_display_mode *t, *mode; + + /* If the EDID preferred mode doesn't match the native mode, use it */ + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) + memcpy(native_mode, mode, sizeof(*mode)); + } + } /* Try to get native mode details from EDID if necessary */ if (!native_mode->clock) { - struct drm_display_mode *t, *mode; - list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { if (mode->hdisplay == native_mode->hdisplay && mode->vdisplay == native_mode->vdisplay) { @@ -493,6 +501,7 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, } } } + if (!native_mode->clock) { DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); radeon_encoder->rmx_type = RMX_OFF; -- cgit v0.10.2 From bcc65fd8e929a9d9d34d814d6efc1d2793546922 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Mon, 8 Aug 2011 16:21:16 +0000 Subject: drm/radeon: re-POST the asic on Apple hardware when booted via EFI At least some Apples program the GPU into a state that wedges the engine once userspace starts trying to perform accelerated operations. Executing the Atom init scripts gets the hardware back into a working state. The same hardware works fine when booted via BIOS emulation, so let's just execute the init scripts on Apples when we're using EFI. Signed-off-by: Matthew Garrett Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 440e6ec..a3b011b 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "radeon_reg.h" #include "radeon.h" #include "atom.h" @@ -348,6 +349,9 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; + if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) + return false; + /* first check CRTCs */ if (ASIC_IS_DCE41(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | -- cgit v0.10.2 From e9b52ef2228cd0bed7a4465c693a39489e2c338d Mon Sep 17 00:00:00 2001 From: Vasiliy Kulikov Date: Fri, 12 Aug 2011 00:55:37 +0400 Subject: perf: fix temporary file ownership check A file in /tmp/ might be a symlink, so lstat() should be used instead of stat(). Acked-by: Pekka Enberg Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110811205537.GA22864@albatros Signed-off-by: Vasiliy Kulikov Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index e142c21..469c026 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1506,7 +1506,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) if (strncmp(dso->name, "/tmp/perf-", 10) == 0) { struct stat st; - if (stat(dso->name, &st) < 0) + if (lstat(dso->name, &st) < 0) return -1; if (st.st_uid && (st.st_uid != geteuid())) { -- cgit v0.10.2 From 8afa2a707d3d1320df5d35966729ac5262da737d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:02:29 +0900 Subject: perf probe: Fix a memory leak for scopes array Fix a memory leak for scopes array when it finds a variable in the global scope. Reviewed-by: Pekka Enberg Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110229.19900.63019.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 3e44a3e..573c723 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -660,6 +660,7 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) else { /* Search upper class */ nscopes = dwarf_getscopes_die(sp_die, &scopes); + ret = -ENOENT; while (nscopes-- > 1) { pr_debug("Searching variables in %s\n", dwarf_diename(&scopes[nscopes])); @@ -668,14 +669,12 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) pf->pvar->var, 0, &vr_die)) { ret = convert_variable(&vr_die, pf); - goto found; + break; } } if (scopes) free(scopes); - ret = -ENOENT; } -found: if (ret < 0) pr_warning("Failed to find '%s' in this function.\n", pf->pvar->var); -- cgit v0.10.2 From a128405c6b40371c59c34b00cc66ed06285b9551 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:02:35 +0900 Subject: perf probe: Fix line walker to check CU correctly Fix line walker to check whether a given DIE is CU or not. Actually this function accepts CU, subprogram and inlined_subroutine DIEs. Without this fix, perf probe always fails to analyze lines on inlined functions; $ perf probe -L pre_schedule Debuginfo analysis failed. (-2) Error: Failed to show lines. (-2) This fixes that bug, as below. $ perf probe -L pre_schedule 0 static inline void pre_schedule(struct rq *rq, struct task_struct *prev { 2 if (prev->sched_class->pre_schedule) 3 prev->sched_class->pre_schedule(rq, prev); } /* rq->lock is NOT held, but preemption is disabled */ Changes from v1: - Update against current tip tree.(Fix dwarf-aux.c) Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110235.19900.20614.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index fddf40f..d35b454 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -439,7 +439,7 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) /** * die_walk_lines - Walk on lines inside given DIE - * @rt_die: a root DIE (CU or subprogram) + * @rt_die: a root DIE (CU, subprogram or inlined_subroutine) * @callback: callback routine * @data: user data * @@ -460,12 +460,12 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) size_t nlines, i; /* Get the CU die */ - if (dwarf_tag(rt_die) == DW_TAG_subprogram) + if (dwarf_tag(rt_die) != DW_TAG_compile_unit) cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL); else cu_die = rt_die; if (!cu_die) { - pr_debug2("Failed to get CU from subprogram\n"); + pr_debug2("Failed to get CU from given DIE.\n"); return -EINVAL; } -- cgit v0.10.2 From b0e9cb2802d4bf50955cca8a7d87cf94ebf750a5 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:02:41 +0900 Subject: perf probe: Fix to search nested inlined functions in CU Fix perf probe to walk through the lines of all nested inlined function call sites and declared lines when a whole CU is passed to the line walker. The die_walk_lines() can have two different type of DIEs, subprogram (or inlined-subroutine) DIE and CU DIE. If a caller passes a subprogram DIE, this means that the walker walk on lines of given subprogram. In this case, it just needs to search on direct children of DIE tree for finding call-site information of inlined function which directly called from given subprogram. On the other hand, if a caller passes a CU DIE to the walker, this means that the walker have to walk on all lines in the source files included in given CU DIE. In this case, it has to search whole DIE trees of all subprograms to find the call-site information of all nested inlined functions. Without this patch: $ perf probe --line kernel/cpu.c:151-157 static int cpu_notify(unsigned long val, void *v) { 154 return __cpu_notify(val, v, -1, NULL); } With this: $ perf probe --line kernel/cpu.c:151-157 152 static int cpu_notify(unsigned long val, void *v) { 154 return __cpu_notify(val, v, -1, NULL); } As you can see, --line option with source line range shows the declared lines as probe-able. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110241.19900.34994.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index d35b454..d9b8ad0 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -198,6 +198,19 @@ static int die_get_attr_udata(Dwarf_Die *tp_die, unsigned int attr_name, return 0; } +/* Get attribute and translate it as a sdata */ +static int die_get_attr_sdata(Dwarf_Die *tp_die, unsigned int attr_name, + Dwarf_Sword *result) +{ + Dwarf_Attribute attr; + + if (dwarf_attr(tp_die, attr_name, &attr) == NULL || + dwarf_formsdata(&attr, result) != 0) + return -ENOENT; + + return 0; +} + /** * die_is_signed_type - Check whether a type DIE is signed or not * @tp_die: a DIE of a type @@ -250,6 +263,39 @@ int die_get_data_member_location(Dwarf_Die *mb_die, Dwarf_Word *offs) return 0; } +/* Get the call file index number in CU DIE */ +static int die_get_call_fileno(Dwarf_Die *in_die) +{ + Dwarf_Sword idx; + + if (die_get_attr_sdata(in_die, DW_AT_call_file, &idx) == 0) + return (int)idx; + else + return -ENOENT; +} + +/** + * die_get_call_file - Get callsite file name of inlined function instance + * @in_die: a DIE of an inlined function instance + * + * Get call-site file name of @in_die. This means from which file the inline + * function is called. + */ +const char *die_get_call_file(Dwarf_Die *in_die) +{ + Dwarf_Die cu_die; + Dwarf_Files *files; + int idx; + + idx = die_get_call_fileno(in_die); + if (idx < 0 || !dwarf_diecu(in_die, &cu_die, NULL, NULL) || + dwarf_getsrcfiles(&cu_die, &files, NULL) != 0) + return NULL; + + return dwarf_filesrc(files, idx, NULL, NULL); +} + + /** * die_find_child - Generic DIE search function in DIE tree * @rt_die: a root DIE @@ -376,7 +422,7 @@ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, /* Line walker internal parameters */ struct __line_walk_param { - const char *fname; + bool recursive; line_walk_callback_t callback; void *data; int retval; @@ -385,39 +431,56 @@ struct __line_walk_param { static int __die_walk_funclines_cb(Dwarf_Die *in_die, void *data) { struct __line_walk_param *lw = data; - Dwarf_Addr addr; + Dwarf_Addr addr = 0; + const char *fname; int lineno; if (dwarf_tag(in_die) == DW_TAG_inlined_subroutine) { + fname = die_get_call_file(in_die); lineno = die_get_call_lineno(in_die); - if (lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { - lw->retval = lw->callback(lw->fname, lineno, addr, - lw->data); + if (fname && lineno > 0 && dwarf_entrypc(in_die, &addr) == 0) { + lw->retval = lw->callback(fname, lineno, addr, lw->data); if (lw->retval != 0) return DIE_FIND_CB_END; } } - return DIE_FIND_CB_SIBLING; + if (!lw->recursive) + /* Don't need to search recursively */ + return DIE_FIND_CB_SIBLING; + + if (addr) { + fname = dwarf_decl_file(in_die); + if (fname && dwarf_decl_line(in_die, &lineno) == 0) { + lw->retval = lw->callback(fname, lineno, addr, lw->data); + if (lw->retval != 0) + return DIE_FIND_CB_END; + } + } + + /* Continue to search nested inlined function call-sites */ + return DIE_FIND_CB_CONTINUE; } /* Walk on lines of blocks included in given DIE */ -static int __die_walk_funclines(Dwarf_Die *sp_die, +static int __die_walk_funclines(Dwarf_Die *sp_die, bool recursive, line_walk_callback_t callback, void *data) { struct __line_walk_param lw = { + .recursive = recursive, .callback = callback, .data = data, .retval = 0, }; Dwarf_Die die_mem; Dwarf_Addr addr; + const char *fname; int lineno; /* Handle function declaration line */ - lw.fname = dwarf_decl_file(sp_die); - if (lw.fname && dwarf_decl_line(sp_die, &lineno) == 0 && + fname = dwarf_decl_file(sp_die); + if (fname && dwarf_decl_line(sp_die, &lineno) == 0 && dwarf_entrypc(sp_die, &addr) == 0) { - lw.retval = callback(lw.fname, lineno, addr, data); + lw.retval = callback(fname, lineno, addr, data); if (lw.retval != 0) goto done; } @@ -430,7 +493,7 @@ static int __die_walk_culines_cb(Dwarf_Die *sp_die, void *data) { struct __line_walk_param *lw = data; - lw->retval = __die_walk_funclines(sp_die, lw->callback, lw->data); + lw->retval = __die_walk_funclines(sp_die, true, lw->callback, lw->data); if (lw->retval != 0) return DWARF_CB_ABORT; @@ -509,7 +572,11 @@ int die_walk_lines(Dwarf_Die *rt_die, line_walk_callback_t callback, void *data) * subroutines. We have to check functions list or given function. */ if (rt_die != cu_die) - ret = __die_walk_funclines(rt_die, callback, data); + /* + * Don't need walk functions recursively, because nested + * inlined functions don't have lines of the specified DIE. + */ + ret = __die_walk_funclines(rt_die, false, callback, data); else { struct __line_walk_param param = { .callback = callback, diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index bc3b211..c8e491b 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -40,6 +40,9 @@ extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); /* Get callsite line number of inline-function instance */ extern int die_get_call_lineno(Dwarf_Die *in_die); +/* Get callsite file name of inlined function instance */ +extern const char *die_get_call_file(Dwarf_Die *in_die); + /* Get type die */ extern Dwarf_Die *die_get_type(Dwarf_Die *vr_die, Dwarf_Die *die_mem); -- cgit v0.10.2 From 36c0c588b9ea979b619d6ddced410f9551e4c5fa Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:02:47 +0900 Subject: perf probe: Fix to walk all inline instances Fix line-range collector to walk all instances of inlined function, because some execution paths can be optimized out depending on the function argument of instances. E.g.) inline_func (arg) { if (arg) do_something; else do_another; } func_A() { inline_func(1) } func_B() { inline_func(0) } In this case, func_A may have only do_something code and func_B may have only do_another. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Masami Hiramatsu Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110247.19900.93702.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 573c723..d6d5768 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1393,7 +1393,13 @@ static int line_range_inline_cb(Dwarf_Die *in_die, void *data) struct dwarf_callback_param *param = data; param->retval = find_line_range_by_line(in_die, param->data); - return DWARF_CB_ABORT; /* No need to find other instances */ + + /* + * We have to check all instances of inlined function, because + * some execution paths can be optimized out depends on the + * function argument of instances + */ + return DWARF_CB_OK; } /* Search function from function name */ -- cgit v0.10.2 From 13e27d7686c457c625242fc2c20be30eef942408 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:02:53 +0900 Subject: perf probe: Warn when more than one line are given Check multiple --lines option and print warning informing that only the first specified --line option is valid. Changes from the 1st post: - Accept only the first option instead of the last. - Fix warning message according to David's comment. - Mark as a bugfix. Cc: David Ahern Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110253.19900.96192.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 5f2a5c7..710ae3d 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -134,10 +134,18 @@ static int opt_show_lines(const struct option *opt __used, { int ret = 0; - if (str) - ret = parse_line_range_desc(str, ¶ms.line_range); - INIT_LIST_HEAD(¶ms.line_range.line_list); + if (!str) + return 0; + + if (params.show_lines) { + pr_warning("Warning: more than one --line options are" + " detected. Only the first one is valid.\n"); + return 0; + } + params.show_lines = true; + ret = parse_line_range_desc(str, ¶ms.line_range); + INIT_LIST_HEAD(¶ms.line_range.line_list); return ret; } -- cgit v0.10.2 From 221d061182b8ff5507d5768aeeecbc74f01c5dfa Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:02:59 +0900 Subject: perf probe: Fix to search local variables in appropriate scope Fix perf probe to search local variables in appropriate local inlined function scope. For example, pre_schedule() has only 2 local variables, as below; $ perf probe -L pre_schedule 0 static inline void pre_schedule(struct rq *rq, struct task_struct *prev) { 2 if (prev->sched_class->pre_schedule) 3 prev->sched_class->pre_schedule(rq, prev); } However, current perf probe shows 4 local variables on pre_schedule(), because it searches variables in the caller(schedule()) scope. $ perf probe -V pre_schedule Available variables at pre_schedule @ int cpu long unsigned int* switch_count struct rq* rq struct task_struct* prev This patch fixes this issue by searching variables in the local scope of the instance of inlined function. Here is the result. $ perf probe -V pre_schedule Available variables at pre_schedule @ struct rq* rq struct task_struct* prev Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110259.19900.85664.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index d9b8ad0..425703a 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -96,6 +96,39 @@ int cu_find_lineinfo(Dwarf_Die *cu_die, unsigned long addr, return *lineno ?: -ENOENT; } +static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data); + +/** + * cu_walk_functions_at - Walk on function DIEs at given address + * @cu_die: A CU DIE + * @addr: An address + * @callback: A callback which called with found DIEs + * @data: A user data + * + * Walk on function DIEs at given @addr in @cu_die. Passed DIEs + * should be subprogram or inlined-subroutines. + */ +int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, + int (*callback)(Dwarf_Die *, void *), void *data) +{ + Dwarf_Die die_mem; + Dwarf_Die *sc_die; + int ret = -ENOENT; + + /* Inlined function could be recursive. Trace it until fail */ + for (sc_die = die_find_realfunc(cu_die, addr, &die_mem); + sc_die != NULL; + sc_die = die_find_child(sc_die, __die_find_inline_cb, &addr, + &die_mem)) { + ret = callback(sc_die, data); + if (ret) + break; + } + + return ret; + +} + /** * die_compare_name - Compare diename and tname * @dw_die: a DIE diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index c8e491b..6f46106 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -34,6 +34,10 @@ extern const char *cu_get_comp_dir(Dwarf_Die *cu_die); extern int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr, const char **fname, int *lineno); +/* Walk on funcitons at given address */ +extern int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr, + int (*callback)(Dwarf_Die *, void *), void *data); + /* Compare diename and tname */ extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index d6d5768..5c83b7d 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -612,8 +612,8 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) return ret; } -/* Find a variable in a subprogram die */ -static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) +/* Find a variable in a scope DIE */ +static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) { Dwarf_Die vr_die, *scopes; char buf[32], *ptr; @@ -655,11 +655,11 @@ static int find_variable(Dwarf_Die *sp_die, struct probe_finder *pf) pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); /* Search child die for local variables and parameters. */ - if (die_find_variable_at(sp_die, pf->pvar->var, pf->addr, &vr_die)) + if (die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) ret = convert_variable(&vr_die, pf); else { /* Search upper class */ - nscopes = dwarf_getscopes_die(sp_die, &scopes); + nscopes = dwarf_getscopes_die(sc_die, &scopes); ret = -ENOENT; while (nscopes-- > 1) { pr_debug("Searching variables in %s\n", @@ -717,26 +717,30 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwarf_Addr paddr, return 0; } -/* Call probe_finder callback with real subprogram DIE */ -static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) +/* Call probe_finder callback with scope DIE */ +static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) { - Dwarf_Die die_mem; Dwarf_Attribute fb_attr; size_t nops; int ret; - /* If no real subprogram, find a real one */ - if (!sp_die || dwarf_tag(sp_die) != DW_TAG_subprogram) { - sp_die = die_find_realfunc(&pf->cu_die, pf->addr, &die_mem); - if (!sp_die) { + if (!sc_die) { + pr_err("Caller must pass a scope DIE. Program error.\n"); + return -EINVAL; + } + + /* If not a real subprogram, find a real one */ + if (dwarf_tag(sc_die) != DW_TAG_subprogram) { + if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { pr_warning("Failed to find probe point in any " "functions.\n"); return -ENOENT; } - } + } else + memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die)); - /* Get the frame base attribute/ops */ - dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr); + /* Get the frame base attribute/ops from subprogram */ + dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr); ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); if (ret <= 0 || nops == 0) { pf->fb_ops = NULL; @@ -754,7 +758,7 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) } /* Call finder's callback handler */ - ret = pf->callback(sp_die, pf); + ret = pf->callback(sc_die, pf); /* *pf->fb_ops will be cached in libdw. Don't free it. */ pf->fb_ops = NULL; @@ -762,17 +766,82 @@ static int call_probe_finder(Dwarf_Die *sp_die, struct probe_finder *pf) return ret; } +struct find_scope_param { + const char *function; + const char *file; + int line; + int diff; + Dwarf_Die *die_mem; + bool found; +}; + +static int find_best_scope_cb(Dwarf_Die *fn_die, void *data) +{ + struct find_scope_param *fsp = data; + const char *file; + int lno; + + /* Skip if declared file name does not match */ + if (fsp->file) { + file = dwarf_decl_file(fn_die); + if (!file || strcmp(fsp->file, file) != 0) + return 0; + } + /* If the function name is given, that's what user expects */ + if (fsp->function) { + if (die_compare_name(fn_die, fsp->function)) { + memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); + fsp->found = true; + return 1; + } + } else { + /* With the line number, find the nearest declared DIE */ + dwarf_decl_line(fn_die, &lno); + if (lno < fsp->line && fsp->diff > fsp->line - lno) { + /* Keep a candidate and continue */ + fsp->diff = fsp->line - lno; + memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die)); + fsp->found = true; + } + } + return 0; +} + +/* Find an appropriate scope fits to given conditions */ +static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem) +{ + struct find_scope_param fsp = { + .function = pf->pev->point.function, + .file = pf->fname, + .line = pf->lno, + .diff = INT_MAX, + .die_mem = die_mem, + .found = false, + }; + + cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp); + + return fsp.found ? die_mem : NULL; +} + static int probe_point_line_walker(const char *fname, int lineno, Dwarf_Addr addr, void *data) { struct probe_finder *pf = data; + Dwarf_Die *sc_die, die_mem; int ret; if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) return 0; pf->addr = addr; - ret = call_probe_finder(NULL, pf); + sc_die = find_best_scope(pf, &die_mem); + if (!sc_die) { + pr_warning("Failed to find scope of probe point.\n"); + return -ENOENT; + } + + ret = call_probe_finder(sc_die, pf); /* Continue if no error, because the line will be in inline function */ return ret < 0 ? ret : 0; @@ -826,6 +895,7 @@ static int probe_point_lazy_walker(const char *fname, int lineno, Dwarf_Addr addr, void *data) { struct probe_finder *pf = data; + Dwarf_Die *sc_die, die_mem; int ret; if (!line_list__has_line(&pf->lcache, lineno) || @@ -835,7 +905,14 @@ static int probe_point_lazy_walker(const char *fname, int lineno, pr_debug("Probe line found: line:%d addr:0x%llx\n", lineno, (unsigned long long)addr); pf->addr = addr; - ret = call_probe_finder(NULL, pf); + pf->lno = lineno; + sc_die = find_best_scope(pf, &die_mem); + if (!sc_die) { + pr_warning("Failed to find scope of probe point.\n"); + return -ENOENT; + } + + ret = call_probe_finder(sc_die, pf); /* * Continue if no error, because the lazy pattern will match @@ -1059,7 +1136,7 @@ found: } /* Add a found probe point into trace event list */ -static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) +static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) { struct trace_event_finder *tf = container_of(pf, struct trace_event_finder, pf); @@ -1074,8 +1151,9 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) } tev = &tf->tevs[tf->ntevs++]; - ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, - &tev->point); + /* Trace point should be converted from subprogram DIE */ + ret = convert_to_trace_point(&pf->sp_die, pf->addr, + pf->pev->point.retprobe, &tev->point); if (ret < 0) return ret; @@ -1090,7 +1168,8 @@ static int add_probe_trace_event(Dwarf_Die *sp_die, struct probe_finder *pf) for (i = 0; i < pf->pev->nargs; i++) { pf->pvar = &pf->pev->args[i]; pf->tvar = &tev->args[i]; - ret = find_variable(sp_die, pf); + /* Variable should be found from scope DIE */ + ret = find_variable(sc_die, pf); if (ret != 0) return ret; } @@ -1158,7 +1237,7 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data) } /* Add a found vars into available variables list */ -static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) +static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) { struct available_var_finder *af = container_of(pf, struct available_var_finder, pf); @@ -1173,8 +1252,9 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) } vl = &af->vls[af->nvls++]; - ret = convert_to_trace_point(sp_die, pf->addr, pf->pev->point.retprobe, - &vl->point); + /* Trace point should be converted from subprogram DIE */ + ret = convert_to_trace_point(&pf->sp_die, pf->addr, + pf->pev->point.retprobe, &vl->point); if (ret < 0) return ret; @@ -1186,14 +1266,14 @@ static int add_available_vars(Dwarf_Die *sp_die, struct probe_finder *pf) if (vl->vars == NULL) return -ENOMEM; af->child = true; - die_find_child(sp_die, collect_variables_cb, (void *)af, &die_mem); + die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem); /* Find external variables */ if (!af->externs) goto out; /* Don't need to search child DIE for externs. */ af->child = false; - nscopes = dwarf_getscopes_die(sp_die, &scopes); + nscopes = dwarf_getscopes_die(sc_die, &scopes); while (nscopes-- > 1) die_find_child(&scopes[nscopes], collect_variables_cb, (void *)af, &die_mem); diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index c478b42..1132c8f 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -57,7 +57,7 @@ struct probe_finder { struct perf_probe_event *pev; /* Target probe event */ /* Callback when a probe point is found */ - int (*callback)(Dwarf_Die *sp_die, struct probe_finder *pf); + int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf); /* For function searching */ int lno; /* Line number */ -- cgit v0.10.2 From f182e3e13ca71b64b40fab1aef31fa6a78271648 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:03:05 +0900 Subject: perf probe: Avoid searching variables in intermediate scopes Fix variable searching logic to search one in inner than local scope or global(CU) scope. In the other words, skip searching in intermediate scopes. e.g., in the following code, int var1; void inline infunc(int i) { i++; <--- [A] } void func(void) { int var1, var2; infunc(var2); } At [A], "var1" should point the global variable "var1", however, if user mis-typed as "var2", variable search should be failed. However, current logic searches variable infunc() scope, global scope, and then func() scope. Thus, it can find "var2" variable in func() scope. This may not be what user expects. So, it would better not search outer scopes except outermost (compile unit) scope which contains only global variables, when it failed to find given variable in local scope. E.g. Without this: $ perf probe -V pre_schedule --externs > without.vars With this: $ perf probe -V pre_schedule --externs > with.vars Check the diff: $ diff without.vars with.vars 88d87 < int cpu 133d131 < long unsigned int* switch_count These vars are actually in the scope of schedule(), the caller of pre_schedule(). Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110305.19900.94374.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 5c83b7d..114542a 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -615,9 +615,9 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) /* Find a variable in a scope DIE */ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) { - Dwarf_Die vr_die, *scopes; + Dwarf_Die vr_die; char buf[32], *ptr; - int ret, nscopes; + int ret = 0; if (!is_c_varname(pf->pvar->var)) { /* Copy raw parameters */ @@ -652,29 +652,16 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) if (pf->tvar->name == NULL) return -ENOMEM; - pr_debug("Searching '%s' variable in context.\n", - pf->pvar->var); + pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); /* Search child die for local variables and parameters. */ - if (die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) - ret = convert_variable(&vr_die, pf); - else { - /* Search upper class */ - nscopes = dwarf_getscopes_die(sc_die, &scopes); - ret = -ENOENT; - while (nscopes-- > 1) { - pr_debug("Searching variables in %s\n", - dwarf_diename(&scopes[nscopes])); - /* We should check this scope, so give dummy address */ - if (die_find_variable_at(&scopes[nscopes], - pf->pvar->var, 0, - &vr_die)) { - ret = convert_variable(&vr_die, pf); - break; - } - } - if (scopes) - free(scopes); + if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { + /* Search again in global variables */ + if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) + ret = -ENOENT; } + if (ret == 0) + ret = convert_variable(&vr_die, pf); + if (ret < 0) pr_warning("Failed to find '%s' in this function.\n", pf->pvar->var); @@ -1242,8 +1229,8 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) struct available_var_finder *af = container_of(pf, struct available_var_finder, pf); struct variable_list *vl; - Dwarf_Die die_mem, *scopes = NULL; - int ret, nscopes; + Dwarf_Die die_mem; + int ret; /* Check number of tevs */ if (af->nvls == af->max_vls) { @@ -1273,12 +1260,7 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) goto out; /* Don't need to search child DIE for externs. */ af->child = false; - nscopes = dwarf_getscopes_die(sc_die, &scopes); - while (nscopes-- > 1) - die_find_child(&scopes[nscopes], collect_variables_cb, - (void *)af, &die_mem); - if (scopes) - free(scopes); + die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem); out: if (strlist__empty(vl->vars)) { -- cgit v0.10.2 From db0d2c6420eeb8fd669bac84d72f1ab828bbaa64 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:03:11 +0900 Subject: perf probe: Search concrete out-of-line instances gcc 4.6 generates a concrete out-of-line instance when there is a function which is implicitly inlined somewhere but also has its own instance. The concrete out-of-line instance means that it has an abstract origin of the function which is referred by not only inlined-subroutines but also a concrete subprogram. Since current dwarf_func_inline_instances() can find only instances of inlined-subroutines, this introduces new die_walk_instances() to find both of subprogram and inlined-subroutines. e.g. without this, Available variables at sched_group_rt_period @ struct task_group* tg perf probe failed to find actual subprogram instance of sched_group_rt_period(). With this, Available variables at sched_group_rt_period @ struct task_group* tg @ struct task_group* tg Now it found the sched_group_rt_period() itself. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110311.19900.63997.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 425703a..d0f4048 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -453,6 +453,64 @@ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, return die_mem; } +struct __instance_walk_param { + void *addr; + int (*callback)(Dwarf_Die *, void *); + void *data; + int retval; +}; + +static int __die_walk_instances_cb(Dwarf_Die *inst, void *data) +{ + struct __instance_walk_param *iwp = data; + Dwarf_Attribute attr_mem; + Dwarf_Die origin_mem; + Dwarf_Attribute *attr; + Dwarf_Die *origin; + + attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem); + if (attr == NULL) + return DIE_FIND_CB_CONTINUE; + + origin = dwarf_formref_die(attr, &origin_mem); + if (origin == NULL || origin->addr != iwp->addr) + return DIE_FIND_CB_CONTINUE; + + iwp->retval = iwp->callback(inst, iwp->data); + + return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE; +} + +/** + * die_walk_instances - Walk on instances of given DIE + * @or_die: an abstract original DIE + * @callback: a callback function which is called with instance DIE + * @data: user data + * + * Walk on the instances of give @in_die. @in_die must be an inlined function + * declartion. This returns the return value of @callback if it returns + * non-zero value, or -ENOENT if there is no instance. + */ +int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *), + void *data) +{ + Dwarf_Die cu_die; + Dwarf_Die die_mem; + struct __instance_walk_param iwp = { + .addr = or_die->addr, + .callback = callback, + .data = data, + .retval = -ENOENT, + }; + + if (dwarf_diecu(or_die, &cu_die, NULL, NULL) == NULL) + return -ENOENT; + + die_find_child(&cu_die, __die_walk_instances_cb, &iwp, &die_mem); + + return iwp.retval; +} + /* Line walker internal parameters */ struct __line_walk_param { bool recursive; diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 6f46106..6ce1717 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -80,6 +80,10 @@ extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr, extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr, Dwarf_Die *die_mem); +/* Walk on the instances of given DIE */ +extern int die_walk_instances(Dwarf_Die *in_die, + int (*callback)(Dwarf_Die *, void *), void *data); + /* Walker on lines (Note: line number will not be sorted) */ typedef int (* line_walk_callback_t) (const char *fname, int lineno, Dwarf_Addr addr, void *data); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 114542a..555fc38 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -924,42 +924,39 @@ static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) return die_walk_lines(sp_die, probe_point_lazy_walker, pf); } -/* Callback parameter with return value */ -struct dwarf_callback_param { - void *data; - int retval; -}; - static int probe_point_inline_cb(Dwarf_Die *in_die, void *data) { - struct dwarf_callback_param *param = data; - struct probe_finder *pf = param->data; + struct probe_finder *pf = data; struct perf_probe_point *pp = &pf->pev->point; Dwarf_Addr addr; + int ret; if (pp->lazy_line) - param->retval = find_probe_point_lazy(in_die, pf); + ret = find_probe_point_lazy(in_die, pf); else { /* Get probe address */ if (dwarf_entrypc(in_die, &addr) != 0) { pr_warning("Failed to get entry address of %s.\n", dwarf_diename(in_die)); - param->retval = -ENOENT; - return DWARF_CB_ABORT; + return -ENOENT; } pf->addr = addr; pf->addr += pp->offset; pr_debug("found inline addr: 0x%jx\n", (uintmax_t)pf->addr); - param->retval = call_probe_finder(in_die, pf); - if (param->retval < 0) - return DWARF_CB_ABORT; + ret = call_probe_finder(in_die, pf); } - return DWARF_CB_OK; + return ret; } +/* Callback parameter with return value for libdw */ +struct dwarf_callback_param { + void *data; + int retval; +}; + /* Search function from function name */ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) { @@ -996,14 +993,10 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data) /* TODO: Check the address in this function */ param->retval = call_probe_finder(sp_die, pf); } - } else { - struct dwarf_callback_param _param = {.data = (void *)pf, - .retval = 0}; + } else /* Inlined function: search instances */ - dwarf_func_inline_instances(sp_die, probe_point_inline_cb, - &_param); - param->retval = _param.retval; - } + param->retval = die_walk_instances(sp_die, + probe_point_inline_cb, (void *)pf); return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */ } @@ -1452,16 +1445,14 @@ static int find_line_range_by_line(Dwarf_Die *sp_die, struct line_finder *lf) static int line_range_inline_cb(Dwarf_Die *in_die, void *data) { - struct dwarf_callback_param *param = data; - - param->retval = find_line_range_by_line(in_die, param->data); + find_line_range_by_line(in_die, data); /* * We have to check all instances of inlined function, because * some execution paths can be optimized out depends on the * function argument of instances */ - return DWARF_CB_OK; + return 0; } /* Search function from function name */ @@ -1489,15 +1480,10 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data) pr_debug("New line range: %d to %d\n", lf->lno_s, lf->lno_e); lr->start = lf->lno_s; lr->end = lf->lno_e; - if (dwarf_func_inline(sp_die)) { - struct dwarf_callback_param _param; - _param.data = (void *)lf; - _param.retval = 0; - dwarf_func_inline_instances(sp_die, - line_range_inline_cb, - &_param); - param->retval = _param.retval; - } else + if (dwarf_func_inline(sp_die)) + param->retval = die_walk_instances(sp_die, + line_range_inline_cb, lf); + else param->retval = find_line_range_by_line(sp_die, lf); return DWARF_CB_ABORT; } -- cgit v0.10.2 From 3f4460a28fb2f73df6c32c3a305797abc01c0f9c Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 11 Aug 2011 20:03:18 +0900 Subject: perf probe: Filter out redundant inline-instances With gcc4.6, some instances of concrete inlined function looks redundant and broken, because it appears inside of a concrete instance and its call_file and call_line are same as the original abstruct's decl_file and decl_line respectively. e.g. [ d1aa] subprogram external (flag) Yes name (strp) "add_timer" decl_file (data1) 2 ;here is original decl_line (data2) 847 ;line and file prototyped (flag) Yes inline (data1) inlined (1) sibling (ref4) [ d1c6] ... [ 11d84] subprogram abstract_origin (ref4) [ d1aa] ; concrete instance low_pc (addr) .text+0x000000000000246f high_pc (addr) .text+0x000000000000248b frame_base (block1) [ 0] call_frame_cfa sibling (ref4) [ 11dd9] [ 11d9f] formal_parameter abstract_origin (ref4) [ d1b9] location (data4) location list [ 701b] [ 11da8] inlined_subroutine abstract_origin (ref4) [ d1aa] ; redundant instance low_pc (addr) .text+0x000000000000247e high_pc (addr) .text+0x0000000000002480 call_file (data1) 2 ; call line and file call_line (data2) 847 ; are same as above Those redundant instances leads unwilling results; e.g. find probe points inside of functions even if we specify a function entry as below; $ perf probe -V add_timer Available variables at add_timer @ struct timer_list* timer @ (No matched variables) So, this filters out those redundant instances based on call-site and decl-site information. Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Paul Mackerras Cc: Pekka Enberg Cc: Peter Zijlstra Cc: yrl.pp-manager.tt@hitachi.com Link: http://lkml.kernel.org/r/20110811110317.19900.59525.stgit@fedora15 Signed-off-by: Masami Hiramatsu Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index d0f4048..ee51e9b 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -307,6 +307,17 @@ static int die_get_call_fileno(Dwarf_Die *in_die) return -ENOENT; } +/* Get the declared file index number in CU DIE */ +static int die_get_decl_fileno(Dwarf_Die *pdie) +{ + Dwarf_Sword idx; + + if (die_get_attr_sdata(pdie, DW_AT_decl_file, &idx) == 0) + return (int)idx; + else + return -ENOENT; +} + /** * die_get_call_file - Get callsite file name of inlined function instance * @in_die: a DIE of an inlined function instance @@ -467,6 +478,7 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data) Dwarf_Die origin_mem; Dwarf_Attribute *attr; Dwarf_Die *origin; + int tmp; attr = dwarf_attr(inst, DW_AT_abstract_origin, &attr_mem); if (attr == NULL) @@ -476,6 +488,16 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data) if (origin == NULL || origin->addr != iwp->addr) return DIE_FIND_CB_CONTINUE; + /* Ignore redundant instances */ + if (dwarf_tag(inst) == DW_TAG_inlined_subroutine) { + dwarf_decl_line(origin, &tmp); + if (die_get_call_lineno(inst) == tmp) { + tmp = die_get_decl_fileno(origin); + if (die_get_call_fileno(inst) == tmp) + return DIE_FIND_CB_CONTINUE; + } + } + iwp->retval = iwp->callback(inst, iwp->data); return (iwp->retval) ? DIE_FIND_CB_END : DIE_FIND_CB_CONTINUE; -- cgit v0.10.2 From 9c1176b6a28850703ea6e3a0f0c703f6d6c61cd3 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 11 Aug 2011 00:06:04 +0200 Subject: firewire: cdev: fix 32 bit userland on 64 bit kernel compat corner cases Clemens points out that we need to use compat_ptr() in order to safely cast from u64 to addresses of a 32-bit usermode client. Before, our conversion went wrong - in practice if the client cast from pointer to integer such that sign-extension happened, (libraw1394 and libdc1394 at least were not doing that, IOW were not affected) or - in theory on s390 (which doesn't have FireWire though) and on the tile architecture, regardless of what the client does. The bug would usually be observed as the initial get_info ioctl failing with "Bad address" (EFAULT). Reported-by: Carl Karsten Reported-by: Clemens Ladisch Signed-off-by: Stefan Richter diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index e6ad3bb..4799393 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -216,15 +216,33 @@ struct inbound_phy_packet_event { struct fw_cdev_event_phy_packet phy_packet; }; -static inline void __user *u64_to_uptr(__u64 value) +#ifdef CONFIG_COMPAT +static void __user *u64_to_uptr(u64 value) +{ + if (is_compat_task()) + return compat_ptr(value); + else + return (void __user *)(unsigned long)value; +} + +static u64 uptr_to_u64(void __user *ptr) +{ + if (is_compat_task()) + return ptr_to_compat(ptr); + else + return (u64)(unsigned long)ptr; +} +#else +static inline void __user *u64_to_uptr(u64 value) { return (void __user *)(unsigned long)value; } -static inline __u64 uptr_to_u64(void __user *ptr) +static inline u64 uptr_to_u64(void __user *ptr) { - return (__u64)(unsigned long)ptr; + return (u64)(unsigned long)ptr; } +#endif /* CONFIG_COMPAT */ static int fw_device_op_open(struct inode *inode, struct file *file) { -- cgit v0.10.2 From a01e836087881dd9d824417190994c9b2b0f1dbb Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Thu, 11 Aug 2011 20:40:42 +0200 Subject: firewire: ohci: fix DMA unmapping in an error path If request_irq failed, we would pass wrong arguments to dma_free_coherent. https://bugzilla.redhat.com/show_bug.cgi?id=728185 Reported-by: Mads Kiilerich Signed-off-by: Stefan Richter diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 4f6d72f..ded0c9b 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2178,8 +2178,13 @@ static int ohci_enable(struct fw_card *card, ohci_driver_name, ohci)) { fw_error("Failed to allocate interrupt %d.\n", dev->irq); pci_disable_msi(dev); - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - ohci->config_rom, ohci->config_rom_bus); + + if (config_rom) { + dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, + ohci->next_config_rom, + ohci->next_config_rom_bus); + ohci->next_config_rom = NULL; + } return -EIO; } -- cgit v0.10.2 From f6b864a9071e21186476910613ec9913b56067a2 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 12 Aug 2011 18:22:10 +0200 Subject: ASoC: Fix compile warning in wm8750.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sound/soc/codecs/wm8750.c:784:2: warning: missing braces around initializer sound/soc/codecs/wm8750.c:784:2: warning: (near initialization for ‘wm8750_spi_ids[2].name’) It's because struct spi_device_id.name is a char array, not a pointer, while the driver initializes explicitly with 0. Signed-off-by: Takashi Iwai diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c index 82ac5fc..d0003cc 100644 --- a/sound/soc/codecs/wm8750.c +++ b/sound/soc/codecs/wm8750.c @@ -781,7 +781,7 @@ static int __devexit wm8750_spi_remove(struct spi_device *spi) static const struct spi_device_id wm8750_spi_ids[] = { { "wm8750", 0 }, { "wm8987", 0 }, - { 0, 0 }, + { }, }; MODULE_DEVICE_TABLE(spi, wm8750_spi_ids); -- cgit v0.10.2 From f8afdf481f0fef5e170c6c928cec42879d505654 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 12 Aug 2011 13:31:30 -0400 Subject: drivers/net/wireless/wl12xx: add missing kfree In each case, the freed data should be freed in the error handling code as well. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @exists@ local idexpression x; statement S,S1; expression E; identifier fl; expression *ptr != NULL; @@ x = \(kmalloc\|kzalloc\|kcalloc\)(...); ... if (x == NULL) S <... when != x when != if (...) { <+...kfree(x)...+> } when any when != true x == NULL x->fl ...> ( if (x == NULL) S1 | if (...) { ... when != x when forall ( return \(0\|<+...x...+>\|ptr\); | * return ...; ) } ) // Signed-off-by: Julia Lawall Acked-by: Luciano Coelho Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index 7e33f1f..34f6ab5 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c @@ -77,8 +77,6 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) auth->sleep_auth = sleep_auth; ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); - if (ret < 0) - return ret; out: kfree(auth); @@ -624,10 +622,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl) ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, detection, sizeof(*detection)); - if (ret < 0) { + if (ret < 0) wl1271_warning("failed to set cca threshold: %d", ret); - return ret; - } out: kfree(detection); diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c index 5d5e1ef..88add68 100644 --- a/drivers/net/wireless/wl12xx/testmode.c +++ b/drivers/net/wireless/wl12xx/testmode.c @@ -139,12 +139,15 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[]) if (ret < 0) { wl1271_warning("testmode cmd interrogate failed: %d", ret); + kfree(cmd); return ret; } skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd)); - if (!skb) + if (!skb) { + kfree(cmd); return -ENOMEM; + } NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); -- cgit v0.10.2 From ac4d6888b21a8be373f3e06f1d4011fbe2bbbeac Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 3 Aug 2011 02:18:29 +0000 Subject: xfs: Check the return value of xfs_buf_read() for NULL Check the return value of xfs_buf_read() for NULL and return ENOMEM if it is NULL. This is necessary in a few spots to avoid subsequent code blindly dereferencing the null buffer pointer. Signed-off-by: Chandra Seetharaman Signed-off-by: Alex Elder diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1076b7e..b9afff8 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2131,6 +2131,8 @@ xlog_recover_buffer_pass2( bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, buf_flags); + if (!bp) + return XFS_ERROR(ENOMEM); error = xfs_buf_geterror(bp); if (error) { xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, @@ -2222,6 +2224,10 @@ xlog_recover_inode_pass2( bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, XBF_LOCK); + if (!bp) { + error = ENOMEM; + goto error; + } error = xfs_buf_geterror(bp); if (error) { xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index dd05360..2a432d0 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -83,6 +83,8 @@ xfs_readlink_bmap( bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); + if (!bp) + return XFS_ERROR(ENOMEM); error = xfs_buf_geterror(bp); if (error) { xfs_ioerror_alert("xfs_readlink", -- cgit v0.10.2 From e570280521290c27621d60cffea2400bdf1f2c88 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 3 Aug 2011 02:18:34 +0000 Subject: xfs: replace xfs_buf_geterror() with bp->b_error Since we just checked bp for NULL, it is ok to replace xfs_buf_geterror() with bp->b_error in these places. Signed-off-by: Chandra Seetharaman Signed-off-by: Alex Elder diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b9afff8..a199dbc 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2133,7 +2133,7 @@ xlog_recover_buffer_pass2( buf_flags); if (!bp) return XFS_ERROR(ENOMEM); - error = xfs_buf_geterror(bp); + error = bp->b_error; if (error) { xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, bp, buf_f->blf_blkno); @@ -2228,7 +2228,7 @@ xlog_recover_inode_pass2( error = ENOMEM; goto error; } - error = xfs_buf_geterror(bp); + error = bp->b_error; if (error) { xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, bp, in_f->ilf_blkno); diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 2a432d0..51fc429 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -85,7 +85,7 @@ xfs_readlink_bmap( XBF_LOCK | XBF_MAPPED | XBF_DONT_BLOCK); if (!bp) return XFS_ERROR(ENOMEM); - error = xfs_buf_geterror(bp); + error = bp->b_error; if (error) { xfs_ioerror_alert("xfs_readlink", ip->i_mount, bp, XFS_BUF_ADDR(bp)); -- cgit v0.10.2 From 06f8e2d6754dc631732415b741b5aa58a0f7133f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 12 Aug 2011 13:57:55 -0500 Subject: xfs: don't expect xfs headers to be in subdirectories Fix up some #include directives in preparation for moving a few header files out of xfs source subdirectories. Note that "xfs_linux.h" also got its quoting convention for included files switched. Signed-off-by: Alex Elder Reviewed-by: Christoph Hellwig diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 75bb316..b100cf4 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -16,7 +16,7 @@ # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # -ccflags-y := -I$(src) -I$(src)/linux-2.6 +ccflags-y := -I$(src) -I$(src)/linux-2.6 -I$(src)/quota -I$(src)/support ccflags-$(CONFIG_XFS_DEBUG) += -g XFS_LINUX := linux-2.6 diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index d42f814..1e8a45e 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -32,13 +32,12 @@ # define XFS_BIG_INUMS 0 #endif -#include +#include "xfs_types.h" -#include -#include -#include - -#include +#include "kmem.h" +#include "mrlock.h" +#include "time.h" +#include "uuid.h" #include #include @@ -78,14 +77,14 @@ #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include "xfs_vnode.h" +#include "xfs_stats.h" +#include "xfs_sysctl.h" +#include "xfs_iops.h" +#include "xfs_aops.h" +#include "xfs_super.h" +#include "xfs_buf.h" +#include "xfs_message.h" #ifdef __BIG_ENDIAN #define XFS_NATIVE_HOST 1 diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c index 29b9d64..7e76f53 100644 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ b/fs/xfs/linux-2.6/xfs_quotaops.c @@ -25,7 +25,7 @@ #include "xfs_trans.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" -#include "quota/xfs_qm.h" +#include "xfs_qm.h" #include diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c index 88d25d4..9010ce8 100644 --- a/fs/xfs/linux-2.6/xfs_trace.c +++ b/fs/xfs/linux-2.6/xfs_trace.c @@ -43,8 +43,8 @@ #include "xfs_quota.h" #include "xfs_iomap.h" #include "xfs_aops.h" -#include "quota/xfs_dquot_item.h" -#include "quota/xfs_dquot.h" +#include "xfs_dquot_item.h" +#include "xfs_dquot.h" #include "xfs_log_recover.h" #include "xfs_inode_item.h" diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 53ec3ea..d8b11b7 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h @@ -24,5 +24,6 @@ #define XFS_BUF_LOCK_TRACKING 1 #endif -#include +#include "xfs_linux.h" + #endif /* __XFS_H__ */ -- cgit v0.10.2 From c59d87c460767bc35dafd490139d3cfe78fb8da4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 12 Aug 2011 16:21:35 -0500 Subject: xfs: remove subdirectories Use the move from Linux 2.6 to Linux 3.x as an excuse to kill the annoying subdirectories in the XFS source code. Besides the large amount of file rename the only changes are to the Makefile, a few files including headers with the subdirectory prefix, and the binary sysctl compat code that includes a header under fs/xfs/ from kernel/. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index b100cf4..ffce328 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -16,44 +16,51 @@ # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # -ccflags-y := -I$(src) -I$(src)/linux-2.6 -I$(src)/quota -I$(src)/support ccflags-$(CONFIG_XFS_DEBUG) += -g -XFS_LINUX := linux-2.6 - obj-$(CONFIG_XFS_FS) += xfs.o -xfs-y += linux-2.6/xfs_trace.o - -xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \ - xfs_dquot.o \ - xfs_dquot_item.o \ - xfs_trans_dquot.o \ - xfs_qm_syscalls.o \ - xfs_qm_bhv.o \ - xfs_qm.o) -xfs-$(CONFIG_XFS_QUOTA) += linux-2.6/xfs_quotaops.o - -ifeq ($(CONFIG_XFS_QUOTA),y) -xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o -endif - -xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o -xfs-$(CONFIG_XFS_POSIX_ACL) += $(XFS_LINUX)/xfs_acl.o -xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o -xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o -xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o +# this one should be compiled first, as the tracing macros can easily blow up +xfs-y += xfs_trace.o +# highlevel code +xfs-y += xfs_aops.o \ + xfs_bit.o \ + xfs_buf.o \ + xfs_dfrag.o \ + xfs_discard.o \ + xfs_error.o \ + xfs_export.o \ + xfs_file.o \ + xfs_filestream.o \ + xfs_fsops.o \ + xfs_fs_subr.o \ + xfs_globals.o \ + xfs_iget.o \ + xfs_ioctl.o \ + xfs_iomap.o \ + xfs_iops.o \ + xfs_itable.o \ + xfs_message.o \ + xfs_mru_cache.o \ + xfs_super.o \ + xfs_sync.o \ + xfs_xattr.o \ + xfs_rename.o \ + xfs_rw.o \ + xfs_utils.o \ + xfs_vnodeops.o \ + kmem.o \ + uuid.o +# code shared with libxfs xfs-y += xfs_alloc.o \ xfs_alloc_btree.o \ xfs_attr.o \ xfs_attr_leaf.o \ - xfs_bit.o \ xfs_bmap.o \ xfs_bmap_btree.o \ xfs_btree.o \ - xfs_buf_item.o \ xfs_da_btree.o \ xfs_dir2.o \ xfs_dir2_block.o \ @@ -61,49 +68,37 @@ xfs-y += xfs_alloc.o \ xfs_dir2_leaf.o \ xfs_dir2_node.o \ xfs_dir2_sf.o \ - xfs_error.o \ - xfs_extfree_item.o \ - xfs_filestream.o \ - xfs_fsops.o \ xfs_ialloc.o \ xfs_ialloc_btree.o \ - xfs_iget.o \ xfs_inode.o \ - xfs_inode_item.o \ - xfs_iomap.o \ - xfs_itable.o \ - xfs_dfrag.o \ - xfs_log.o \ - xfs_log_cil.o \ xfs_log_recover.o \ xfs_mount.o \ - xfs_mru_cache.o \ - xfs_rename.o \ - xfs_trans.o \ + xfs_trans.o + +# low-level transaction/log code +xfs-y += xfs_log.o \ + xfs_log_cil.o \ + xfs_buf_item.o \ + xfs_extfree_item.o \ + xfs_inode_item.o \ xfs_trans_ail.o \ xfs_trans_buf.o \ xfs_trans_extfree.o \ xfs_trans_inode.o \ - xfs_utils.o \ - xfs_vnodeops.o \ - xfs_rw.o - -# Objects in linux/ -xfs-y += $(addprefix $(XFS_LINUX)/, \ - kmem.o \ - xfs_aops.o \ - xfs_buf.o \ - xfs_discard.o \ - xfs_export.o \ - xfs_file.o \ - xfs_fs_subr.o \ - xfs_globals.o \ - xfs_ioctl.o \ - xfs_iops.o \ - xfs_message.o \ - xfs_super.o \ - xfs_sync.o \ - xfs_xattr.o) -# Objects in support/ -xfs-y += support/uuid.o +# optional features +xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \ + xfs_dquot_item.o \ + xfs_trans_dquot.o \ + xfs_qm_syscalls.o \ + xfs_qm_bhv.o \ + xfs_qm.o \ + xfs_quotaops.o +ifeq ($(CONFIG_XFS_QUOTA),y) +xfs-$(CONFIG_PROC_FS) += xfs_qm_stats.o +endif +xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o +xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o +xfs-$(CONFIG_PROC_FS) += xfs_stats.o +xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o +xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c new file mode 100644 index 0000000..a907de5 --- /dev/null +++ b/fs/xfs/kmem.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include "time.h" +#include "kmem.h" +#include "xfs_message.h" + +/* + * Greedy allocation. May fail and may return vmalloced memory. + * + * Must be freed using kmem_free_large. + */ +void * +kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) +{ + void *ptr; + size_t kmsize = maxsize; + + while (!(ptr = kmem_zalloc_large(kmsize))) { + if ((kmsize >>= 1) <= minsize) + kmsize = minsize; + } + if (ptr) + *size = kmsize; + return ptr; +} + +void * +kmem_alloc(size_t size, unsigned int __nocast flags) +{ + int retries = 0; + gfp_t lflags = kmem_flags_convert(flags); + void *ptr; + + do { + ptr = kmalloc(size, lflags); + if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) + return ptr; + if (!(++retries % 100)) + xfs_err(NULL, + "possible memory allocation deadlock in %s (mode:0x%x)", + __func__, lflags); + congestion_wait(BLK_RW_ASYNC, HZ/50); + } while (1); +} + +void * +kmem_zalloc(size_t size, unsigned int __nocast flags) +{ + void *ptr; + + ptr = kmem_alloc(size, flags); + if (ptr) + memset((char *)ptr, 0, (int)size); + return ptr; +} + +void +kmem_free(const void *ptr) +{ + if (!is_vmalloc_addr(ptr)) { + kfree(ptr); + } else { + vfree(ptr); + } +} + +void * +kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, + unsigned int __nocast flags) +{ + void *new; + + new = kmem_alloc(newsize, flags); + if (ptr) { + if (new) + memcpy(new, ptr, + ((oldsize < newsize) ? oldsize : newsize)); + kmem_free(ptr); + } + return new; +} + +void * +kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) +{ + int retries = 0; + gfp_t lflags = kmem_flags_convert(flags); + void *ptr; + + do { + ptr = kmem_cache_alloc(zone, lflags); + if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) + return ptr; + if (!(++retries % 100)) + xfs_err(NULL, + "possible memory allocation deadlock in %s (mode:0x%x)", + __func__, lflags); + congestion_wait(BLK_RW_ASYNC, HZ/50); + } while (1); +} + +void * +kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) +{ + void *ptr; + + ptr = kmem_zone_alloc(zone, flags); + if (ptr) + memset((char *)ptr, 0, kmem_cache_size(zone)); + return ptr; +} diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h new file mode 100644 index 0000000..f7c8f7a --- /dev/null +++ b/fs/xfs/kmem.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SUPPORT_KMEM_H__ +#define __XFS_SUPPORT_KMEM_H__ + +#include +#include +#include +#include + +/* + * General memory allocation interfaces + */ + +#define KM_SLEEP 0x0001u +#define KM_NOSLEEP 0x0002u +#define KM_NOFS 0x0004u +#define KM_MAYFAIL 0x0008u + +/* + * We use a special process flag to avoid recursive callbacks into + * the filesystem during transactions. We will also issue our own + * warnings, so we explicitly skip any generic ones (silly of us). + */ +static inline gfp_t +kmem_flags_convert(unsigned int __nocast flags) +{ + gfp_t lflags; + + BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); + + if (flags & KM_NOSLEEP) { + lflags = GFP_ATOMIC | __GFP_NOWARN; + } else { + lflags = GFP_KERNEL | __GFP_NOWARN; + if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) + lflags &= ~__GFP_FS; + } + return lflags; +} + +extern void *kmem_alloc(size_t, unsigned int __nocast); +extern void *kmem_zalloc(size_t, unsigned int __nocast); +extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); +extern void kmem_free(const void *); + +static inline void *kmem_zalloc_large(size_t size) +{ + void *ptr; + + ptr = vmalloc(size); + if (ptr) + memset(ptr, 0, size); + return ptr; +} +static inline void kmem_free_large(void *ptr) +{ + vfree(ptr); +} + +extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); + +/* + * Zone interfaces + */ + +#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN +#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT +#define KM_ZONE_SPREAD SLAB_MEM_SPREAD + +#define kmem_zone kmem_cache +#define kmem_zone_t struct kmem_cache + +static inline kmem_zone_t * +kmem_zone_init(int size, char *zone_name) +{ + return kmem_cache_create(zone_name, size, 0, 0, NULL); +} + +static inline kmem_zone_t * +kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, + void (*construct)(void *)) +{ + return kmem_cache_create(zone_name, size, 0, flags, construct); +} + +static inline void +kmem_zone_free(kmem_zone_t *zone, void *ptr) +{ + kmem_cache_free(zone, ptr); +} + +static inline void +kmem_zone_destroy(kmem_zone_t *zone) +{ + if (zone) + kmem_cache_destroy(zone); +} + +extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); +extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); + +static inline int +kmem_shake_allow(gfp_t gfp_mask) +{ + return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); +} + +#endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c deleted file mode 100644 index a907de5..0000000 --- a/fs/xfs/linux-2.6/kmem.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include -#include -#include -#include -#include -#include -#include "time.h" -#include "kmem.h" -#include "xfs_message.h" - -/* - * Greedy allocation. May fail and may return vmalloced memory. - * - * Must be freed using kmem_free_large. - */ -void * -kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize) -{ - void *ptr; - size_t kmsize = maxsize; - - while (!(ptr = kmem_zalloc_large(kmsize))) { - if ((kmsize >>= 1) <= minsize) - kmsize = minsize; - } - if (ptr) - *size = kmsize; - return ptr; -} - -void * -kmem_alloc(size_t size, unsigned int __nocast flags) -{ - int retries = 0; - gfp_t lflags = kmem_flags_convert(flags); - void *ptr; - - do { - ptr = kmalloc(size, lflags); - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) - return ptr; - if (!(++retries % 100)) - xfs_err(NULL, - "possible memory allocation deadlock in %s (mode:0x%x)", - __func__, lflags); - congestion_wait(BLK_RW_ASYNC, HZ/50); - } while (1); -} - -void * -kmem_zalloc(size_t size, unsigned int __nocast flags) -{ - void *ptr; - - ptr = kmem_alloc(size, flags); - if (ptr) - memset((char *)ptr, 0, (int)size); - return ptr; -} - -void -kmem_free(const void *ptr) -{ - if (!is_vmalloc_addr(ptr)) { - kfree(ptr); - } else { - vfree(ptr); - } -} - -void * -kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, - unsigned int __nocast flags) -{ - void *new; - - new = kmem_alloc(newsize, flags); - if (ptr) { - if (new) - memcpy(new, ptr, - ((oldsize < newsize) ? oldsize : newsize)); - kmem_free(ptr); - } - return new; -} - -void * -kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) -{ - int retries = 0; - gfp_t lflags = kmem_flags_convert(flags); - void *ptr; - - do { - ptr = kmem_cache_alloc(zone, lflags); - if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) - return ptr; - if (!(++retries % 100)) - xfs_err(NULL, - "possible memory allocation deadlock in %s (mode:0x%x)", - __func__, lflags); - congestion_wait(BLK_RW_ASYNC, HZ/50); - } while (1); -} - -void * -kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) -{ - void *ptr; - - ptr = kmem_zone_alloc(zone, flags); - if (ptr) - memset((char *)ptr, 0, kmem_cache_size(zone)); - return ptr; -} diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h deleted file mode 100644 index f7c8f7a..0000000 --- a/fs/xfs/linux-2.6/kmem.h +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_KMEM_H__ -#define __XFS_SUPPORT_KMEM_H__ - -#include -#include -#include -#include - -/* - * General memory allocation interfaces - */ - -#define KM_SLEEP 0x0001u -#define KM_NOSLEEP 0x0002u -#define KM_NOFS 0x0004u -#define KM_MAYFAIL 0x0008u - -/* - * We use a special process flag to avoid recursive callbacks into - * the filesystem during transactions. We will also issue our own - * warnings, so we explicitly skip any generic ones (silly of us). - */ -static inline gfp_t -kmem_flags_convert(unsigned int __nocast flags) -{ - gfp_t lflags; - - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); - - if (flags & KM_NOSLEEP) { - lflags = GFP_ATOMIC | __GFP_NOWARN; - } else { - lflags = GFP_KERNEL | __GFP_NOWARN; - if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) - lflags &= ~__GFP_FS; - } - return lflags; -} - -extern void *kmem_alloc(size_t, unsigned int __nocast); -extern void *kmem_zalloc(size_t, unsigned int __nocast); -extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); -extern void kmem_free(const void *); - -static inline void *kmem_zalloc_large(size_t size) -{ - void *ptr; - - ptr = vmalloc(size); - if (ptr) - memset(ptr, 0, size); - return ptr; -} -static inline void kmem_free_large(void *ptr) -{ - vfree(ptr); -} - -extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); - -/* - * Zone interfaces - */ - -#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN -#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT -#define KM_ZONE_SPREAD SLAB_MEM_SPREAD - -#define kmem_zone kmem_cache -#define kmem_zone_t struct kmem_cache - -static inline kmem_zone_t * -kmem_zone_init(int size, char *zone_name) -{ - return kmem_cache_create(zone_name, size, 0, 0, NULL); -} - -static inline kmem_zone_t * -kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, - void (*construct)(void *)) -{ - return kmem_cache_create(zone_name, size, 0, flags, construct); -} - -static inline void -kmem_zone_free(kmem_zone_t *zone, void *ptr) -{ - kmem_cache_free(zone, ptr); -} - -static inline void -kmem_zone_destroy(kmem_zone_t *zone) -{ - if (zone) - kmem_cache_destroy(zone); -} - -extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); -extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); - -static inline int -kmem_shake_allow(gfp_t gfp_mask) -{ - return ((gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)); -} - -#endif /* __XFS_SUPPORT_KMEM_H__ */ diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h deleted file mode 100644 index ff6a198..0000000 --- a/fs/xfs/linux-2.6/mrlock.h +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright (c) 2000-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_MRLOCK_H__ -#define __XFS_SUPPORT_MRLOCK_H__ - -#include - -typedef struct { - struct rw_semaphore mr_lock; -#ifdef DEBUG - int mr_writer; -#endif -} mrlock_t; - -#ifdef DEBUG -#define mrinit(mrp, name) \ - do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0) -#else -#define mrinit(mrp, name) \ - do { init_rwsem(&(mrp)->mr_lock); } while (0) -#endif - -#define mrlock_init(mrp, t,n,s) mrinit(mrp, n) -#define mrfree(mrp) do { } while (0) - -static inline void mraccess_nested(mrlock_t *mrp, int subclass) -{ - down_read_nested(&mrp->mr_lock, subclass); -} - -static inline void mrupdate_nested(mrlock_t *mrp, int subclass) -{ - down_write_nested(&mrp->mr_lock, subclass); -#ifdef DEBUG - mrp->mr_writer = 1; -#endif -} - -static inline int mrtryaccess(mrlock_t *mrp) -{ - return down_read_trylock(&mrp->mr_lock); -} - -static inline int mrtryupdate(mrlock_t *mrp) -{ - if (!down_write_trylock(&mrp->mr_lock)) - return 0; -#ifdef DEBUG - mrp->mr_writer = 1; -#endif - return 1; -} - -static inline void mrunlock_excl(mrlock_t *mrp) -{ -#ifdef DEBUG - mrp->mr_writer = 0; -#endif - up_write(&mrp->mr_lock); -} - -static inline void mrunlock_shared(mrlock_t *mrp) -{ - up_read(&mrp->mr_lock); -} - -static inline void mrdemote(mrlock_t *mrp) -{ -#ifdef DEBUG - mrp->mr_writer = 0; -#endif - downgrade_write(&mrp->mr_lock); -} - -#endif /* __XFS_SUPPORT_MRLOCK_H__ */ diff --git a/fs/xfs/linux-2.6/time.h b/fs/xfs/linux-2.6/time.h deleted file mode 100644 index 387e695..0000000 --- a/fs/xfs/linux-2.6/time.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_TIME_H__ -#define __XFS_SUPPORT_TIME_H__ - -#include -#include - -typedef struct timespec timespec_t; - -static inline void delay(long ticks) -{ - schedule_timeout_uninterruptible(ticks); -} - -static inline void nanotime(struct timespec *tvp) -{ - *tvp = CURRENT_TIME; -} - -#endif /* __XFS_SUPPORT_TIME_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c deleted file mode 100644 index b6c4b37..0000000 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Copyright (c) 2008, Christoph Hellwig - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_acl.h" -#include "xfs_attr.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_vnodeops.h" -#include "xfs_trace.h" -#include -#include -#include - - -/* - * Locking scheme: - * - all ACL updates are protected by inode->i_mutex, which is taken before - * calling into this file. - */ - -STATIC struct posix_acl * -xfs_acl_from_disk(struct xfs_acl *aclp) -{ - struct posix_acl_entry *acl_e; - struct posix_acl *acl; - struct xfs_acl_entry *ace; - int count, i; - - count = be32_to_cpu(aclp->acl_cnt); - - acl = posix_acl_alloc(count, GFP_KERNEL); - if (!acl) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < count; i++) { - acl_e = &acl->a_entries[i]; - ace = &aclp->acl_entry[i]; - - /* - * The tag is 32 bits on disk and 16 bits in core. - * - * Because every access to it goes through the core - * format first this is not a problem. - */ - acl_e->e_tag = be32_to_cpu(ace->ae_tag); - acl_e->e_perm = be16_to_cpu(ace->ae_perm); - - switch (acl_e->e_tag) { - case ACL_USER: - case ACL_GROUP: - acl_e->e_id = be32_to_cpu(ace->ae_id); - break; - case ACL_USER_OBJ: - case ACL_GROUP_OBJ: - case ACL_MASK: - case ACL_OTHER: - acl_e->e_id = ACL_UNDEFINED_ID; - break; - default: - goto fail; - } - } - return acl; - -fail: - posix_acl_release(acl); - return ERR_PTR(-EINVAL); -} - -STATIC void -xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl) -{ - const struct posix_acl_entry *acl_e; - struct xfs_acl_entry *ace; - int i; - - aclp->acl_cnt = cpu_to_be32(acl->a_count); - for (i = 0; i < acl->a_count; i++) { - ace = &aclp->acl_entry[i]; - acl_e = &acl->a_entries[i]; - - ace->ae_tag = cpu_to_be32(acl_e->e_tag); - ace->ae_id = cpu_to_be32(acl_e->e_id); - ace->ae_perm = cpu_to_be16(acl_e->e_perm); - } -} - -struct posix_acl * -xfs_get_acl(struct inode *inode, int type) -{ - struct xfs_inode *ip = XFS_I(inode); - struct posix_acl *acl; - struct xfs_acl *xfs_acl; - int len = sizeof(struct xfs_acl); - unsigned char *ea_name; - int error; - - acl = get_cached_acl(inode, type); - if (acl != ACL_NOT_CACHED) - return acl; - - trace_xfs_get_acl(ip); - - switch (type) { - case ACL_TYPE_ACCESS: - ea_name = SGI_ACL_FILE; - break; - case ACL_TYPE_DEFAULT: - ea_name = SGI_ACL_DEFAULT; - break; - default: - BUG(); - } - - /* - * If we have a cached ACLs value just return it, not need to - * go out to the disk. - */ - - xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); - if (!xfs_acl) - return ERR_PTR(-ENOMEM); - - error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, - &len, ATTR_ROOT); - if (error) { - /* - * If the attribute doesn't exist make sure we have a negative - * cache entry, for any other error assume it is transient and - * leave the cache entry as ACL_NOT_CACHED. - */ - if (error == -ENOATTR) { - acl = NULL; - goto out_update_cache; - } - goto out; - } - - acl = xfs_acl_from_disk(xfs_acl); - if (IS_ERR(acl)) - goto out; - - out_update_cache: - set_cached_acl(inode, type, acl); - out: - kfree(xfs_acl); - return acl; -} - -STATIC int -xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) -{ - struct xfs_inode *ip = XFS_I(inode); - unsigned char *ea_name; - int error; - - if (S_ISLNK(inode->i_mode)) - return -EOPNOTSUPP; - - switch (type) { - case ACL_TYPE_ACCESS: - ea_name = SGI_ACL_FILE; - break; - case ACL_TYPE_DEFAULT: - if (!S_ISDIR(inode->i_mode)) - return acl ? -EACCES : 0; - ea_name = SGI_ACL_DEFAULT; - break; - default: - return -EINVAL; - } - - if (acl) { - struct xfs_acl *xfs_acl; - int len; - - xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); - if (!xfs_acl) - return -ENOMEM; - - xfs_acl_to_disk(xfs_acl, acl); - len = sizeof(struct xfs_acl) - - (sizeof(struct xfs_acl_entry) * - (XFS_ACL_MAX_ENTRIES - acl->a_count)); - - error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, - len, ATTR_ROOT); - - kfree(xfs_acl); - } else { - /* - * A NULL ACL argument means we want to remove the ACL. - */ - error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); - - /* - * If the attribute didn't exist to start with that's fine. - */ - if (error == -ENOATTR) - error = 0; - } - - if (!error) - set_cached_acl(inode, type, acl); - return error; -} - -static int -xfs_set_mode(struct inode *inode, umode_t mode) -{ - int error = 0; - - if (mode != inode->i_mode) { - struct iattr iattr; - - iattr.ia_valid = ATTR_MODE | ATTR_CTIME; - iattr.ia_mode = mode; - iattr.ia_ctime = current_fs_time(inode->i_sb); - - error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); - } - - return error; -} - -static int -xfs_acl_exists(struct inode *inode, unsigned char *name) -{ - int len = sizeof(struct xfs_acl); - - return (xfs_attr_get(XFS_I(inode), name, NULL, &len, - ATTR_ROOT|ATTR_KERNOVAL) == 0); -} - -int -posix_acl_access_exists(struct inode *inode) -{ - return xfs_acl_exists(inode, SGI_ACL_FILE); -} - -int -posix_acl_default_exists(struct inode *inode) -{ - if (!S_ISDIR(inode->i_mode)) - return 0; - return xfs_acl_exists(inode, SGI_ACL_DEFAULT); -} - -/* - * No need for i_mutex because the inode is not yet exposed to the VFS. - */ -int -xfs_inherit_acl(struct inode *inode, struct posix_acl *acl) -{ - umode_t mode = inode->i_mode; - int error = 0, inherit = 0; - - if (S_ISDIR(inode->i_mode)) { - error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, acl); - if (error) - goto out; - } - - error = posix_acl_create(&acl, GFP_KERNEL, &mode); - if (error < 0) - return error; - - /* - * If posix_acl_create returns a positive value we need to - * inherit a permission that can't be represented using the Unix - * mode bits and we actually need to set an ACL. - */ - if (error > 0) - inherit = 1; - - error = xfs_set_mode(inode, mode); - if (error) - goto out; - - if (inherit) - error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); - -out: - posix_acl_release(acl); - return error; -} - -int -xfs_acl_chmod(struct inode *inode) -{ - struct posix_acl *acl; - int error; - - if (S_ISLNK(inode->i_mode)) - return -EOPNOTSUPP; - - acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); - if (IS_ERR(acl) || !acl) - return PTR_ERR(acl); - - error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); - if (error) - return error; - - error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); - posix_acl_release(acl); - return error; -} - -static int -xfs_xattr_acl_get(struct dentry *dentry, const char *name, - void *value, size_t size, int type) -{ - struct posix_acl *acl; - int error; - - acl = xfs_get_acl(dentry->d_inode, type); - if (IS_ERR(acl)) - return PTR_ERR(acl); - if (acl == NULL) - return -ENODATA; - - error = posix_acl_to_xattr(acl, value, size); - posix_acl_release(acl); - - return error; -} - -static int -xfs_xattr_acl_set(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags, int type) -{ - struct inode *inode = dentry->d_inode; - struct posix_acl *acl = NULL; - int error = 0; - - if (flags & XATTR_CREATE) - return -EINVAL; - if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) - return value ? -EACCES : 0; - if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) - return -EPERM; - - if (!value) - goto set_acl; - - acl = posix_acl_from_xattr(value, size); - if (!acl) { - /* - * acl_set_file(3) may request that we set default ACLs with - * zero length -- defend (gracefully) against that here. - */ - goto out; - } - if (IS_ERR(acl)) { - error = PTR_ERR(acl); - goto out; - } - - error = posix_acl_valid(acl); - if (error) - goto out_release; - - error = -EINVAL; - if (acl->a_count > XFS_ACL_MAX_ENTRIES) - goto out_release; - - if (type == ACL_TYPE_ACCESS) { - umode_t mode = inode->i_mode; - error = posix_acl_equiv_mode(acl, &mode); - - if (error <= 0) { - posix_acl_release(acl); - acl = NULL; - - if (error < 0) - return error; - } - - error = xfs_set_mode(inode, mode); - if (error) - goto out_release; - } - - set_acl: - error = xfs_set_acl(inode, type, acl); - out_release: - posix_acl_release(acl); - out: - return error; -} - -const struct xattr_handler xfs_xattr_acl_access_handler = { - .prefix = POSIX_ACL_XATTR_ACCESS, - .flags = ACL_TYPE_ACCESS, - .get = xfs_xattr_acl_get, - .set = xfs_xattr_acl_set, -}; - -const struct xattr_handler xfs_xattr_acl_default_handler = { - .prefix = POSIX_ACL_XATTR_DEFAULT, - .flags = ACL_TYPE_DEFAULT, - .get = xfs_xattr_acl_get, - .set = xfs_xattr_acl_set, -}; diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c deleted file mode 100644 index 63e971e..0000000 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ /dev/null @@ -1,1499 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_trans.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_alloc.h" -#include "xfs_error.h" -#include "xfs_rw.h" -#include "xfs_iomap.h" -#include "xfs_vnodeops.h" -#include "xfs_trace.h" -#include "xfs_bmap.h" -#include -#include -#include -#include - - -/* - * Prime number of hash buckets since address is used as the key. - */ -#define NVSYNC 37 -#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) -static wait_queue_head_t xfs_ioend_wq[NVSYNC]; - -void __init -xfs_ioend_init(void) -{ - int i; - - for (i = 0; i < NVSYNC; i++) - init_waitqueue_head(&xfs_ioend_wq[i]); -} - -void -xfs_ioend_wait( - xfs_inode_t *ip) -{ - wait_queue_head_t *wq = to_ioend_wq(ip); - - wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); -} - -STATIC void -xfs_ioend_wake( - xfs_inode_t *ip) -{ - if (atomic_dec_and_test(&ip->i_iocount)) - wake_up(to_ioend_wq(ip)); -} - -void -xfs_count_page_state( - struct page *page, - int *delalloc, - int *unwritten) -{ - struct buffer_head *bh, *head; - - *delalloc = *unwritten = 0; - - bh = head = page_buffers(page); - do { - if (buffer_unwritten(bh)) - (*unwritten) = 1; - else if (buffer_delay(bh)) - (*delalloc) = 1; - } while ((bh = bh->b_this_page) != head); -} - -STATIC struct block_device * -xfs_find_bdev_for_inode( - struct inode *inode) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - - if (XFS_IS_REALTIME_INODE(ip)) - return mp->m_rtdev_targp->bt_bdev; - else - return mp->m_ddev_targp->bt_bdev; -} - -/* - * We're now finished for good with this ioend structure. - * Update the page state via the associated buffer_heads, - * release holds on the inode and bio, and finally free - * up memory. Do not use the ioend after this. - */ -STATIC void -xfs_destroy_ioend( - xfs_ioend_t *ioend) -{ - struct buffer_head *bh, *next; - struct xfs_inode *ip = XFS_I(ioend->io_inode); - - for (bh = ioend->io_buffer_head; bh; bh = next) { - next = bh->b_private; - bh->b_end_io(bh, !ioend->io_error); - } - - /* - * Volume managers supporting multiple paths can send back ENODEV - * when the final path disappears. In this case continuing to fill - * the page cache with dirty data which cannot be written out is - * evil, so prevent that. - */ - if (unlikely(ioend->io_error == -ENODEV)) { - xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, - __FILE__, __LINE__); - } - - xfs_ioend_wake(ip); - mempool_free(ioend, xfs_ioend_pool); -} - -/* - * If the end of the current ioend is beyond the current EOF, - * return the new EOF value, otherwise zero. - */ -STATIC xfs_fsize_t -xfs_ioend_new_eof( - xfs_ioend_t *ioend) -{ - xfs_inode_t *ip = XFS_I(ioend->io_inode); - xfs_fsize_t isize; - xfs_fsize_t bsize; - - bsize = ioend->io_offset + ioend->io_size; - isize = MAX(ip->i_size, ip->i_new_size); - isize = MIN(isize, bsize); - return isize > ip->i_d.di_size ? isize : 0; -} - -/* - * Update on-disk file size now that data has been written to disk. The - * current in-memory file size is i_size. If a write is beyond eof i_new_size - * will be the intended file size until i_size is updated. If this write does - * not extend all the way to the valid file size then restrict this update to - * the end of the write. - * - * This function does not block as blocking on the inode lock in IO completion - * can lead to IO completion order dependency deadlocks.. If it can't get the - * inode ilock it will return EAGAIN. Callers must handle this. - */ -STATIC int -xfs_setfilesize( - xfs_ioend_t *ioend) -{ - xfs_inode_t *ip = XFS_I(ioend->io_inode); - xfs_fsize_t isize; - - if (unlikely(ioend->io_error)) - return 0; - - if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) - return EAGAIN; - - isize = xfs_ioend_new_eof(ioend); - if (isize) { - trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); - ip->i_d.di_size = isize; - xfs_mark_inode_dirty(ip); - } - - xfs_iunlock(ip, XFS_ILOCK_EXCL); - return 0; -} - -/* - * Schedule IO completion handling on the final put of an ioend. - */ -STATIC void -xfs_finish_ioend( - struct xfs_ioend *ioend) -{ - if (atomic_dec_and_test(&ioend->io_remaining)) { - if (ioend->io_type == IO_UNWRITTEN) - queue_work(xfsconvertd_workqueue, &ioend->io_work); - else - queue_work(xfsdatad_workqueue, &ioend->io_work); - } -} - -/* - * IO write completion. - */ -STATIC void -xfs_end_io( - struct work_struct *work) -{ - xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); - struct xfs_inode *ip = XFS_I(ioend->io_inode); - int error = 0; - - /* - * For unwritten extents we need to issue transactions to convert a - * range to normal written extens after the data I/O has finished. - */ - if (ioend->io_type == IO_UNWRITTEN && - likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { - - error = xfs_iomap_write_unwritten(ip, ioend->io_offset, - ioend->io_size); - if (error) - ioend->io_error = error; - } - - /* - * We might have to update the on-disk file size after extending - * writes. - */ - error = xfs_setfilesize(ioend); - ASSERT(!error || error == EAGAIN); - - /* - * If we didn't complete processing of the ioend, requeue it to the - * tail of the workqueue for another attempt later. Otherwise destroy - * it. - */ - if (error == EAGAIN) { - atomic_inc(&ioend->io_remaining); - xfs_finish_ioend(ioend); - /* ensure we don't spin on blocked ioends */ - delay(1); - } else { - if (ioend->io_iocb) - aio_complete(ioend->io_iocb, ioend->io_result, 0); - xfs_destroy_ioend(ioend); - } -} - -/* - * Call IO completion handling in caller context on the final put of an ioend. - */ -STATIC void -xfs_finish_ioend_sync( - struct xfs_ioend *ioend) -{ - if (atomic_dec_and_test(&ioend->io_remaining)) - xfs_end_io(&ioend->io_work); -} - -/* - * Allocate and initialise an IO completion structure. - * We need to track unwritten extent write completion here initially. - * We'll need to extend this for updating the ondisk inode size later - * (vs. incore size). - */ -STATIC xfs_ioend_t * -xfs_alloc_ioend( - struct inode *inode, - unsigned int type) -{ - xfs_ioend_t *ioend; - - ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); - - /* - * Set the count to 1 initially, which will prevent an I/O - * completion callback from happening before we have started - * all the I/O from calling the completion routine too early. - */ - atomic_set(&ioend->io_remaining, 1); - ioend->io_error = 0; - ioend->io_list = NULL; - ioend->io_type = type; - ioend->io_inode = inode; - ioend->io_buffer_head = NULL; - ioend->io_buffer_tail = NULL; - atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); - ioend->io_offset = 0; - ioend->io_size = 0; - ioend->io_iocb = NULL; - ioend->io_result = 0; - - INIT_WORK(&ioend->io_work, xfs_end_io); - return ioend; -} - -STATIC int -xfs_map_blocks( - struct inode *inode, - loff_t offset, - struct xfs_bmbt_irec *imap, - int type, - int nonblocking) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - ssize_t count = 1 << inode->i_blkbits; - xfs_fileoff_t offset_fsb, end_fsb; - int error = 0; - int bmapi_flags = XFS_BMAPI_ENTIRE; - int nimaps = 1; - - if (XFS_FORCED_SHUTDOWN(mp)) - return -XFS_ERROR(EIO); - - if (type == IO_UNWRITTEN) - bmapi_flags |= XFS_BMAPI_IGSTATE; - - if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { - if (nonblocking) - return -XFS_ERROR(EAGAIN); - xfs_ilock(ip, XFS_ILOCK_SHARED); - } - - ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || - (ip->i_df.if_flags & XFS_IFEXTENTS)); - ASSERT(offset <= mp->m_maxioffset); - - if (offset + count > mp->m_maxioffset) - count = mp->m_maxioffset - offset; - end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); - offset_fsb = XFS_B_TO_FSBT(mp, offset); - error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, - bmapi_flags, NULL, 0, imap, &nimaps, NULL); - xfs_iunlock(ip, XFS_ILOCK_SHARED); - - if (error) - return -XFS_ERROR(error); - - if (type == IO_DELALLOC && - (!nimaps || isnullstartblock(imap->br_startblock))) { - error = xfs_iomap_write_allocate(ip, offset, count, imap); - if (!error) - trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); - return -XFS_ERROR(error); - } - -#ifdef DEBUG - if (type == IO_UNWRITTEN) { - ASSERT(nimaps); - ASSERT(imap->br_startblock != HOLESTARTBLOCK); - ASSERT(imap->br_startblock != DELAYSTARTBLOCK); - } -#endif - if (nimaps) - trace_xfs_map_blocks_found(ip, offset, count, type, imap); - return 0; -} - -STATIC int -xfs_imap_valid( - struct inode *inode, - struct xfs_bmbt_irec *imap, - xfs_off_t offset) -{ - offset >>= inode->i_blkbits; - - return offset >= imap->br_startoff && - offset < imap->br_startoff + imap->br_blockcount; -} - -/* - * BIO completion handler for buffered IO. - */ -STATIC void -xfs_end_bio( - struct bio *bio, - int error) -{ - xfs_ioend_t *ioend = bio->bi_private; - - ASSERT(atomic_read(&bio->bi_cnt) >= 1); - ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; - - /* Toss bio and pass work off to an xfsdatad thread */ - bio->bi_private = NULL; - bio->bi_end_io = NULL; - bio_put(bio); - - xfs_finish_ioend(ioend); -} - -STATIC void -xfs_submit_ioend_bio( - struct writeback_control *wbc, - xfs_ioend_t *ioend, - struct bio *bio) -{ - atomic_inc(&ioend->io_remaining); - bio->bi_private = ioend; - bio->bi_end_io = xfs_end_bio; - - /* - * If the I/O is beyond EOF we mark the inode dirty immediately - * but don't update the inode size until I/O completion. - */ - if (xfs_ioend_new_eof(ioend)) - xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); - - submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); -} - -STATIC struct bio * -xfs_alloc_ioend_bio( - struct buffer_head *bh) -{ - int nvecs = bio_get_nr_vecs(bh->b_bdev); - struct bio *bio = bio_alloc(GFP_NOIO, nvecs); - - ASSERT(bio->bi_private == NULL); - bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); - bio->bi_bdev = bh->b_bdev; - return bio; -} - -STATIC void -xfs_start_buffer_writeback( - struct buffer_head *bh) -{ - ASSERT(buffer_mapped(bh)); - ASSERT(buffer_locked(bh)); - ASSERT(!buffer_delay(bh)); - ASSERT(!buffer_unwritten(bh)); - - mark_buffer_async_write(bh); - set_buffer_uptodate(bh); - clear_buffer_dirty(bh); -} - -STATIC void -xfs_start_page_writeback( - struct page *page, - int clear_dirty, - int buffers) -{ - ASSERT(PageLocked(page)); - ASSERT(!PageWriteback(page)); - if (clear_dirty) - clear_page_dirty_for_io(page); - set_page_writeback(page); - unlock_page(page); - /* If no buffers on the page are to be written, finish it here */ - if (!buffers) - end_page_writeback(page); -} - -static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) -{ - return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); -} - -/* - * Submit all of the bios for all of the ioends we have saved up, covering the - * initial writepage page and also any probed pages. - * - * Because we may have multiple ioends spanning a page, we need to start - * writeback on all the buffers before we submit them for I/O. If we mark the - * buffers as we got, then we can end up with a page that only has buffers - * marked async write and I/O complete on can occur before we mark the other - * buffers async write. - * - * The end result of this is that we trip a bug in end_page_writeback() because - * we call it twice for the one page as the code in end_buffer_async_write() - * assumes that all buffers on the page are started at the same time. - * - * The fix is two passes across the ioend list - one to start writeback on the - * buffer_heads, and then submit them for I/O on the second pass. - */ -STATIC void -xfs_submit_ioend( - struct writeback_control *wbc, - xfs_ioend_t *ioend) -{ - xfs_ioend_t *head = ioend; - xfs_ioend_t *next; - struct buffer_head *bh; - struct bio *bio; - sector_t lastblock = 0; - - /* Pass 1 - start writeback */ - do { - next = ioend->io_list; - for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) - xfs_start_buffer_writeback(bh); - } while ((ioend = next) != NULL); - - /* Pass 2 - submit I/O */ - ioend = head; - do { - next = ioend->io_list; - bio = NULL; - - for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { - - if (!bio) { - retry: - bio = xfs_alloc_ioend_bio(bh); - } else if (bh->b_blocknr != lastblock + 1) { - xfs_submit_ioend_bio(wbc, ioend, bio); - goto retry; - } - - if (bio_add_buffer(bio, bh) != bh->b_size) { - xfs_submit_ioend_bio(wbc, ioend, bio); - goto retry; - } - - lastblock = bh->b_blocknr; - } - if (bio) - xfs_submit_ioend_bio(wbc, ioend, bio); - xfs_finish_ioend(ioend); - } while ((ioend = next) != NULL); -} - -/* - * Cancel submission of all buffer_heads so far in this endio. - * Toss the endio too. Only ever called for the initial page - * in a writepage request, so only ever one page. - */ -STATIC void -xfs_cancel_ioend( - xfs_ioend_t *ioend) -{ - xfs_ioend_t *next; - struct buffer_head *bh, *next_bh; - - do { - next = ioend->io_list; - bh = ioend->io_buffer_head; - do { - next_bh = bh->b_private; - clear_buffer_async_write(bh); - unlock_buffer(bh); - } while ((bh = next_bh) != NULL); - - xfs_ioend_wake(XFS_I(ioend->io_inode)); - mempool_free(ioend, xfs_ioend_pool); - } while ((ioend = next) != NULL); -} - -/* - * Test to see if we've been building up a completion structure for - * earlier buffers -- if so, we try to append to this ioend if we - * can, otherwise we finish off any current ioend and start another. - * Return true if we've finished the given ioend. - */ -STATIC void -xfs_add_to_ioend( - struct inode *inode, - struct buffer_head *bh, - xfs_off_t offset, - unsigned int type, - xfs_ioend_t **result, - int need_ioend) -{ - xfs_ioend_t *ioend = *result; - - if (!ioend || need_ioend || type != ioend->io_type) { - xfs_ioend_t *previous = *result; - - ioend = xfs_alloc_ioend(inode, type); - ioend->io_offset = offset; - ioend->io_buffer_head = bh; - ioend->io_buffer_tail = bh; - if (previous) - previous->io_list = ioend; - *result = ioend; - } else { - ioend->io_buffer_tail->b_private = bh; - ioend->io_buffer_tail = bh; - } - - bh->b_private = NULL; - ioend->io_size += bh->b_size; -} - -STATIC void -xfs_map_buffer( - struct inode *inode, - struct buffer_head *bh, - struct xfs_bmbt_irec *imap, - xfs_off_t offset) -{ - sector_t bn; - struct xfs_mount *m = XFS_I(inode)->i_mount; - xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); - xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); - - ASSERT(imap->br_startblock != HOLESTARTBLOCK); - ASSERT(imap->br_startblock != DELAYSTARTBLOCK); - - bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + - ((offset - iomap_offset) >> inode->i_blkbits); - - ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); - - bh->b_blocknr = bn; - set_buffer_mapped(bh); -} - -STATIC void -xfs_map_at_offset( - struct inode *inode, - struct buffer_head *bh, - struct xfs_bmbt_irec *imap, - xfs_off_t offset) -{ - ASSERT(imap->br_startblock != HOLESTARTBLOCK); - ASSERT(imap->br_startblock != DELAYSTARTBLOCK); - - xfs_map_buffer(inode, bh, imap, offset); - set_buffer_mapped(bh); - clear_buffer_delay(bh); - clear_buffer_unwritten(bh); -} - -/* - * Test if a given page is suitable for writing as part of an unwritten - * or delayed allocate extent. - */ -STATIC int -xfs_is_delayed_page( - struct page *page, - unsigned int type) -{ - if (PageWriteback(page)) - return 0; - - if (page->mapping && page_has_buffers(page)) { - struct buffer_head *bh, *head; - int acceptable = 0; - - bh = head = page_buffers(page); - do { - if (buffer_unwritten(bh)) - acceptable = (type == IO_UNWRITTEN); - else if (buffer_delay(bh)) - acceptable = (type == IO_DELALLOC); - else if (buffer_dirty(bh) && buffer_mapped(bh)) - acceptable = (type == IO_OVERWRITE); - else - break; - } while ((bh = bh->b_this_page) != head); - - if (acceptable) - return 1; - } - - return 0; -} - -/* - * Allocate & map buffers for page given the extent map. Write it out. - * except for the original page of a writepage, this is called on - * delalloc/unwritten pages only, for the original page it is possible - * that the page has no mapping at all. - */ -STATIC int -xfs_convert_page( - struct inode *inode, - struct page *page, - loff_t tindex, - struct xfs_bmbt_irec *imap, - xfs_ioend_t **ioendp, - struct writeback_control *wbc) -{ - struct buffer_head *bh, *head; - xfs_off_t end_offset; - unsigned long p_offset; - unsigned int type; - int len, page_dirty; - int count = 0, done = 0, uptodate = 1; - xfs_off_t offset = page_offset(page); - - if (page->index != tindex) - goto fail; - if (!trylock_page(page)) - goto fail; - if (PageWriteback(page)) - goto fail_unlock_page; - if (page->mapping != inode->i_mapping) - goto fail_unlock_page; - if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) - goto fail_unlock_page; - - /* - * page_dirty is initially a count of buffers on the page before - * EOF and is decremented as we move each into a cleanable state. - * - * Derivation: - * - * End offset is the highest offset that this page should represent. - * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) - * will evaluate non-zero and be less than PAGE_CACHE_SIZE and - * hence give us the correct page_dirty count. On any other page, - * it will be zero and in that case we need page_dirty to be the - * count of buffers on the page. - */ - end_offset = min_t(unsigned long long, - (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, - i_size_read(inode)); - - len = 1 << inode->i_blkbits; - p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), - PAGE_CACHE_SIZE); - p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; - page_dirty = p_offset / len; - - bh = head = page_buffers(page); - do { - if (offset >= end_offset) - break; - if (!buffer_uptodate(bh)) - uptodate = 0; - if (!(PageUptodate(page) || buffer_uptodate(bh))) { - done = 1; - continue; - } - - if (buffer_unwritten(bh) || buffer_delay(bh) || - buffer_mapped(bh)) { - if (buffer_unwritten(bh)) - type = IO_UNWRITTEN; - else if (buffer_delay(bh)) - type = IO_DELALLOC; - else - type = IO_OVERWRITE; - - if (!xfs_imap_valid(inode, imap, offset)) { - done = 1; - continue; - } - - lock_buffer(bh); - if (type != IO_OVERWRITE) - xfs_map_at_offset(inode, bh, imap, offset); - xfs_add_to_ioend(inode, bh, offset, type, - ioendp, done); - - page_dirty--; - count++; - } else { - done = 1; - } - } while (offset += len, (bh = bh->b_this_page) != head); - - if (uptodate && bh == head) - SetPageUptodate(page); - - if (count) { - if (--wbc->nr_to_write <= 0 && - wbc->sync_mode == WB_SYNC_NONE) - done = 1; - } - xfs_start_page_writeback(page, !page_dirty, count); - - return done; - fail_unlock_page: - unlock_page(page); - fail: - return 1; -} - -/* - * Convert & write out a cluster of pages in the same extent as defined - * by mp and following the start page. - */ -STATIC void -xfs_cluster_write( - struct inode *inode, - pgoff_t tindex, - struct xfs_bmbt_irec *imap, - xfs_ioend_t **ioendp, - struct writeback_control *wbc, - pgoff_t tlast) -{ - struct pagevec pvec; - int done = 0, i; - - pagevec_init(&pvec, 0); - while (!done && tindex <= tlast) { - unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); - - if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) - break; - - for (i = 0; i < pagevec_count(&pvec); i++) { - done = xfs_convert_page(inode, pvec.pages[i], tindex++, - imap, ioendp, wbc); - if (done) - break; - } - - pagevec_release(&pvec); - cond_resched(); - } -} - -STATIC void -xfs_vm_invalidatepage( - struct page *page, - unsigned long offset) -{ - trace_xfs_invalidatepage(page->mapping->host, page, offset); - block_invalidatepage(page, offset); -} - -/* - * If the page has delalloc buffers on it, we need to punch them out before we - * invalidate the page. If we don't, we leave a stale delalloc mapping on the - * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read - * is done on that same region - the delalloc extent is returned when none is - * supposed to be there. - * - * We prevent this by truncating away the delalloc regions on the page before - * invalidating it. Because they are delalloc, we can do this without needing a - * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this - * truncation without a transaction as there is no space left for block - * reservation (typically why we see a ENOSPC in writeback). - * - * This is not a performance critical path, so for now just do the punching a - * buffer head at a time. - */ -STATIC void -xfs_aops_discard_page( - struct page *page) -{ - struct inode *inode = page->mapping->host; - struct xfs_inode *ip = XFS_I(inode); - struct buffer_head *bh, *head; - loff_t offset = page_offset(page); - - if (!xfs_is_delayed_page(page, IO_DELALLOC)) - goto out_invalidate; - - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - goto out_invalidate; - - xfs_alert(ip->i_mount, - "page discard on page %p, inode 0x%llx, offset %llu.", - page, ip->i_ino, offset); - - xfs_ilock(ip, XFS_ILOCK_EXCL); - bh = head = page_buffers(page); - do { - int error; - xfs_fileoff_t start_fsb; - - if (!buffer_delay(bh)) - goto next_buffer; - - start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); - error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); - if (error) { - /* something screwed, just bail */ - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - xfs_alert(ip->i_mount, - "page discard unable to remove delalloc mapping."); - } - break; - } -next_buffer: - offset += 1 << inode->i_blkbits; - - } while ((bh = bh->b_this_page) != head); - - xfs_iunlock(ip, XFS_ILOCK_EXCL); -out_invalidate: - xfs_vm_invalidatepage(page, 0); - return; -} - -/* - * Write out a dirty page. - * - * For delalloc space on the page we need to allocate space and flush it. - * For unwritten space on the page we need to start the conversion to - * regular allocated space. - * For any other dirty buffer heads on the page we should flush them. - */ -STATIC int -xfs_vm_writepage( - struct page *page, - struct writeback_control *wbc) -{ - struct inode *inode = page->mapping->host; - struct buffer_head *bh, *head; - struct xfs_bmbt_irec imap; - xfs_ioend_t *ioend = NULL, *iohead = NULL; - loff_t offset; - unsigned int type; - __uint64_t end_offset; - pgoff_t end_index, last_index; - ssize_t len; - int err, imap_valid = 0, uptodate = 1; - int count = 0; - int nonblocking = 0; - - trace_xfs_writepage(inode, page, 0); - - ASSERT(page_has_buffers(page)); - - /* - * Refuse to write the page out if we are called from reclaim context. - * - * This avoids stack overflows when called from deeply used stacks in - * random callers for direct reclaim or memcg reclaim. We explicitly - * allow reclaim from kswapd as the stack usage there is relatively low. - * - * This should really be done by the core VM, but until that happens - * filesystems like XFS, btrfs and ext4 have to take care of this - * by themselves. - */ - if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) - goto redirty; - - /* - * Given that we do not allow direct reclaim to call us, we should - * never be called while in a filesystem transaction. - */ - if (WARN_ON(current->flags & PF_FSTRANS)) - goto redirty; - - /* Is this page beyond the end of the file? */ - offset = i_size_read(inode); - end_index = offset >> PAGE_CACHE_SHIFT; - last_index = (offset - 1) >> PAGE_CACHE_SHIFT; - if (page->index >= end_index) { - if ((page->index >= end_index + 1) || - !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { - unlock_page(page); - return 0; - } - } - - end_offset = min_t(unsigned long long, - (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, - offset); - len = 1 << inode->i_blkbits; - - bh = head = page_buffers(page); - offset = page_offset(page); - type = IO_OVERWRITE; - - if (wbc->sync_mode == WB_SYNC_NONE) - nonblocking = 1; - - do { - int new_ioend = 0; - - if (offset >= end_offset) - break; - if (!buffer_uptodate(bh)) - uptodate = 0; - - /* - * set_page_dirty dirties all buffers in a page, independent - * of their state. The dirty state however is entirely - * meaningless for holes (!mapped && uptodate), so skip - * buffers covering holes here. - */ - if (!buffer_mapped(bh) && buffer_uptodate(bh)) { - imap_valid = 0; - continue; - } - - if (buffer_unwritten(bh)) { - if (type != IO_UNWRITTEN) { - type = IO_UNWRITTEN; - imap_valid = 0; - } - } else if (buffer_delay(bh)) { - if (type != IO_DELALLOC) { - type = IO_DELALLOC; - imap_valid = 0; - } - } else if (buffer_uptodate(bh)) { - if (type != IO_OVERWRITE) { - type = IO_OVERWRITE; - imap_valid = 0; - } - } else { - if (PageUptodate(page)) { - ASSERT(buffer_mapped(bh)); - imap_valid = 0; - } - continue; - } - - if (imap_valid) - imap_valid = xfs_imap_valid(inode, &imap, offset); - if (!imap_valid) { - /* - * If we didn't have a valid mapping then we need to - * put the new mapping into a separate ioend structure. - * This ensures non-contiguous extents always have - * separate ioends, which is particularly important - * for unwritten extent conversion at I/O completion - * time. - */ - new_ioend = 1; - err = xfs_map_blocks(inode, offset, &imap, type, - nonblocking); - if (err) - goto error; - imap_valid = xfs_imap_valid(inode, &imap, offset); - } - if (imap_valid) { - lock_buffer(bh); - if (type != IO_OVERWRITE) - xfs_map_at_offset(inode, bh, &imap, offset); - xfs_add_to_ioend(inode, bh, offset, type, &ioend, - new_ioend); - count++; - } - - if (!iohead) - iohead = ioend; - - } while (offset += len, ((bh = bh->b_this_page) != head)); - - if (uptodate && bh == head) - SetPageUptodate(page); - - xfs_start_page_writeback(page, 1, count); - - if (ioend && imap_valid) { - xfs_off_t end_index; - - end_index = imap.br_startoff + imap.br_blockcount; - - /* to bytes */ - end_index <<= inode->i_blkbits; - - /* to pages */ - end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; - - /* check against file size */ - if (end_index > last_index) - end_index = last_index; - - xfs_cluster_write(inode, page->index + 1, &imap, &ioend, - wbc, end_index); - } - - if (iohead) - xfs_submit_ioend(wbc, iohead); - - return 0; - -error: - if (iohead) - xfs_cancel_ioend(iohead); - - if (err == -EAGAIN) - goto redirty; - - xfs_aops_discard_page(page); - ClearPageUptodate(page); - unlock_page(page); - return err; - -redirty: - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; -} - -STATIC int -xfs_vm_writepages( - struct address_space *mapping, - struct writeback_control *wbc) -{ - xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); - return generic_writepages(mapping, wbc); -} - -/* - * Called to move a page into cleanable state - and from there - * to be released. The page should already be clean. We always - * have buffer heads in this call. - * - * Returns 1 if the page is ok to release, 0 otherwise. - */ -STATIC int -xfs_vm_releasepage( - struct page *page, - gfp_t gfp_mask) -{ - int delalloc, unwritten; - - trace_xfs_releasepage(page->mapping->host, page, 0); - - xfs_count_page_state(page, &delalloc, &unwritten); - - if (WARN_ON(delalloc)) - return 0; - if (WARN_ON(unwritten)) - return 0; - - return try_to_free_buffers(page); -} - -STATIC int -__xfs_get_blocks( - struct inode *inode, - sector_t iblock, - struct buffer_head *bh_result, - int create, - int direct) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - xfs_fileoff_t offset_fsb, end_fsb; - int error = 0; - int lockmode = 0; - struct xfs_bmbt_irec imap; - int nimaps = 1; - xfs_off_t offset; - ssize_t size; - int new = 0; - - if (XFS_FORCED_SHUTDOWN(mp)) - return -XFS_ERROR(EIO); - - offset = (xfs_off_t)iblock << inode->i_blkbits; - ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); - size = bh_result->b_size; - - if (!create && direct && offset >= i_size_read(inode)) - return 0; - - if (create) { - lockmode = XFS_ILOCK_EXCL; - xfs_ilock(ip, lockmode); - } else { - lockmode = xfs_ilock_map_shared(ip); - } - - ASSERT(offset <= mp->m_maxioffset); - if (offset + size > mp->m_maxioffset) - size = mp->m_maxioffset - offset; - end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); - offset_fsb = XFS_B_TO_FSBT(mp, offset); - - error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, - XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL); - if (error) - goto out_unlock; - - if (create && - (!nimaps || - (imap.br_startblock == HOLESTARTBLOCK || - imap.br_startblock == DELAYSTARTBLOCK))) { - if (direct) { - error = xfs_iomap_write_direct(ip, offset, size, - &imap, nimaps); - } else { - error = xfs_iomap_write_delay(ip, offset, size, &imap); - } - if (error) - goto out_unlock; - - trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); - } else if (nimaps) { - trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); - } else { - trace_xfs_get_blocks_notfound(ip, offset, size); - goto out_unlock; - } - xfs_iunlock(ip, lockmode); - - if (imap.br_startblock != HOLESTARTBLOCK && - imap.br_startblock != DELAYSTARTBLOCK) { - /* - * For unwritten extents do not report a disk address on - * the read case (treat as if we're reading into a hole). - */ - if (create || !ISUNWRITTEN(&imap)) - xfs_map_buffer(inode, bh_result, &imap, offset); - if (create && ISUNWRITTEN(&imap)) { - if (direct) - bh_result->b_private = inode; - set_buffer_unwritten(bh_result); - } - } - - /* - * If this is a realtime file, data may be on a different device. - * to that pointed to from the buffer_head b_bdev currently. - */ - bh_result->b_bdev = xfs_find_bdev_for_inode(inode); - - /* - * If we previously allocated a block out beyond eof and we are now - * coming back to use it then we will need to flag it as new even if it - * has a disk address. - * - * With sub-block writes into unwritten extents we also need to mark - * the buffer as new so that the unwritten parts of the buffer gets - * correctly zeroed. - */ - if (create && - ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || - (offset >= i_size_read(inode)) || - (new || ISUNWRITTEN(&imap)))) - set_buffer_new(bh_result); - - if (imap.br_startblock == DELAYSTARTBLOCK) { - BUG_ON(direct); - if (create) { - set_buffer_uptodate(bh_result); - set_buffer_mapped(bh_result); - set_buffer_delay(bh_result); - } - } - - /* - * If this is O_DIRECT or the mpage code calling tell them how large - * the mapping is, so that we can avoid repeated get_blocks calls. - */ - if (direct || size > (1 << inode->i_blkbits)) { - xfs_off_t mapping_size; - - mapping_size = imap.br_startoff + imap.br_blockcount - iblock; - mapping_size <<= inode->i_blkbits; - - ASSERT(mapping_size > 0); - if (mapping_size > size) - mapping_size = size; - if (mapping_size > LONG_MAX) - mapping_size = LONG_MAX; - - bh_result->b_size = mapping_size; - } - - return 0; - -out_unlock: - xfs_iunlock(ip, lockmode); - return -error; -} - -int -xfs_get_blocks( - struct inode *inode, - sector_t iblock, - struct buffer_head *bh_result, - int create) -{ - return __xfs_get_blocks(inode, iblock, bh_result, create, 0); -} - -STATIC int -xfs_get_blocks_direct( - struct inode *inode, - sector_t iblock, - struct buffer_head *bh_result, - int create) -{ - return __xfs_get_blocks(inode, iblock, bh_result, create, 1); -} - -/* - * Complete a direct I/O write request. - * - * If the private argument is non-NULL __xfs_get_blocks signals us that we - * need to issue a transaction to convert the range from unwritten to written - * extents. In case this is regular synchronous I/O we just call xfs_end_io - * to do this and we are done. But in case this was a successful AIO - * request this handler is called from interrupt context, from which we - * can't start transactions. In that case offload the I/O completion to - * the workqueues we also use for buffered I/O completion. - */ -STATIC void -xfs_end_io_direct_write( - struct kiocb *iocb, - loff_t offset, - ssize_t size, - void *private, - int ret, - bool is_async) -{ - struct xfs_ioend *ioend = iocb->private; - - /* - * blockdev_direct_IO can return an error even after the I/O - * completion handler was called. Thus we need to protect - * against double-freeing. - */ - iocb->private = NULL; - - ioend->io_offset = offset; - ioend->io_size = size; - if (private && size > 0) - ioend->io_type = IO_UNWRITTEN; - - if (is_async) { - /* - * If we are converting an unwritten extent we need to delay - * the AIO completion until after the unwrittent extent - * conversion has completed, otherwise do it ASAP. - */ - if (ioend->io_type == IO_UNWRITTEN) { - ioend->io_iocb = iocb; - ioend->io_result = ret; - } else { - aio_complete(iocb, ret, 0); - } - xfs_finish_ioend(ioend); - } else { - xfs_finish_ioend_sync(ioend); - } - - /* XXX: probably should move into the real I/O completion handler */ - inode_dio_done(ioend->io_inode); -} - -STATIC ssize_t -xfs_vm_direct_IO( - int rw, - struct kiocb *iocb, - const struct iovec *iov, - loff_t offset, - unsigned long nr_segs) -{ - struct inode *inode = iocb->ki_filp->f_mapping->host; - struct block_device *bdev = xfs_find_bdev_for_inode(inode); - ssize_t ret; - - if (rw & WRITE) { - iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); - - ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, - offset, nr_segs, - xfs_get_blocks_direct, - xfs_end_io_direct_write, NULL, 0); - if (ret != -EIOCBQUEUED && iocb->private) - xfs_destroy_ioend(iocb->private); - } else { - ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, - offset, nr_segs, - xfs_get_blocks_direct, - NULL, NULL, 0); - } - - return ret; -} - -STATIC void -xfs_vm_write_failed( - struct address_space *mapping, - loff_t to) -{ - struct inode *inode = mapping->host; - - if (to > inode->i_size) { - /* - * punch out the delalloc blocks we have already allocated. We - * don't call xfs_setattr() to do this as we may be in the - * middle of a multi-iovec write and so the vfs inode->i_size - * will not match the xfs ip->i_size and so it will zero too - * much. Hence we jus truncate the page cache to zero what is - * necessary and punch the delalloc blocks directly. - */ - struct xfs_inode *ip = XFS_I(inode); - xfs_fileoff_t start_fsb; - xfs_fileoff_t end_fsb; - int error; - - truncate_pagecache(inode, to, inode->i_size); - - /* - * Check if there are any blocks that are outside of i_size - * that need to be trimmed back. - */ - start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1; - end_fsb = XFS_B_TO_FSB(ip->i_mount, to); - if (end_fsb <= start_fsb) - return; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - error = xfs_bmap_punch_delalloc_range(ip, start_fsb, - end_fsb - start_fsb); - if (error) { - /* something screwed, just bail */ - if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { - xfs_alert(ip->i_mount, - "xfs_vm_write_failed: unable to clean up ino %lld", - ip->i_ino); - } - } - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } -} - -STATIC int -xfs_vm_write_begin( - struct file *file, - struct address_space *mapping, - loff_t pos, - unsigned len, - unsigned flags, - struct page **pagep, - void **fsdata) -{ - int ret; - - ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, - pagep, xfs_get_blocks); - if (unlikely(ret)) - xfs_vm_write_failed(mapping, pos + len); - return ret; -} - -STATIC int -xfs_vm_write_end( - struct file *file, - struct address_space *mapping, - loff_t pos, - unsigned len, - unsigned copied, - struct page *page, - void *fsdata) -{ - int ret; - - ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); - if (unlikely(ret < len)) - xfs_vm_write_failed(mapping, pos + len); - return ret; -} - -STATIC sector_t -xfs_vm_bmap( - struct address_space *mapping, - sector_t block) -{ - struct inode *inode = (struct inode *)mapping->host; - struct xfs_inode *ip = XFS_I(inode); - - trace_xfs_vm_bmap(XFS_I(inode)); - xfs_ilock(ip, XFS_IOLOCK_SHARED); - xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); - return generic_block_bmap(mapping, block, xfs_get_blocks); -} - -STATIC int -xfs_vm_readpage( - struct file *unused, - struct page *page) -{ - return mpage_readpage(page, xfs_get_blocks); -} - -STATIC int -xfs_vm_readpages( - struct file *unused, - struct address_space *mapping, - struct list_head *pages, - unsigned nr_pages) -{ - return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); -} - -const struct address_space_operations xfs_address_space_operations = { - .readpage = xfs_vm_readpage, - .readpages = xfs_vm_readpages, - .writepage = xfs_vm_writepage, - .writepages = xfs_vm_writepages, - .releasepage = xfs_vm_releasepage, - .invalidatepage = xfs_vm_invalidatepage, - .write_begin = xfs_vm_write_begin, - .write_end = xfs_vm_write_end, - .bmap = xfs_vm_bmap, - .direct_IO = xfs_vm_direct_IO, - .migratepage = buffer_migrate_page, - .is_partially_uptodate = block_is_partially_uptodate, - .error_remove_page = generic_error_remove_page, -}; diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h deleted file mode 100644 index 71f721e..0000000 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright (c) 2005-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_AOPS_H__ -#define __XFS_AOPS_H__ - -extern struct workqueue_struct *xfsdatad_workqueue; -extern struct workqueue_struct *xfsconvertd_workqueue; -extern mempool_t *xfs_ioend_pool; - -/* - * Types of I/O for bmap clustering and I/O completion tracking. - */ -enum { - IO_DIRECT = 0, /* special case for direct I/O ioends */ - IO_DELALLOC, /* mapping covers delalloc region */ - IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ - IO_OVERWRITE, /* mapping covers already allocated extent */ -}; - -#define XFS_IO_TYPES \ - { 0, "" }, \ - { IO_DELALLOC, "delalloc" }, \ - { IO_UNWRITTEN, "unwritten" }, \ - { IO_OVERWRITE, "overwrite" } - -/* - * xfs_ioend struct manages large extent writes for XFS. - * It can manage several multi-page bio's at once. - */ -typedef struct xfs_ioend { - struct xfs_ioend *io_list; /* next ioend in chain */ - unsigned int io_type; /* delalloc / unwritten */ - int io_error; /* I/O error code */ - atomic_t io_remaining; /* hold count */ - struct inode *io_inode; /* file being written to */ - struct buffer_head *io_buffer_head;/* buffer linked list head */ - struct buffer_head *io_buffer_tail;/* buffer linked list tail */ - size_t io_size; /* size of the extent */ - xfs_off_t io_offset; /* offset in the file */ - struct work_struct io_work; /* xfsdatad work queue */ - struct kiocb *io_iocb; - int io_result; -} xfs_ioend_t; - -extern const struct address_space_operations xfs_address_space_operations; -extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); - -extern void xfs_ioend_init(void); -extern void xfs_ioend_wait(struct xfs_inode *); - -extern void xfs_count_page_state(struct page *, int *, int *); - -#endif /* __XFS_AOPS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c deleted file mode 100644 index c57836d..0000000 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ /dev/null @@ -1,1876 +0,0 @@ -/* - * Copyright (c) 2000-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "xfs_sb.h" -#include "xfs_inum.h" -#include "xfs_log.h" -#include "xfs_ag.h" -#include "xfs_mount.h" -#include "xfs_trace.h" - -static kmem_zone_t *xfs_buf_zone; -STATIC int xfsbufd(void *); -STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); - -static struct workqueue_struct *xfslogd_workqueue; -struct workqueue_struct *xfsdatad_workqueue; -struct workqueue_struct *xfsconvertd_workqueue; - -#ifdef XFS_BUF_LOCK_TRACKING -# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) -# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) -# define XB_GET_OWNER(bp) ((bp)->b_last_holder) -#else -# define XB_SET_OWNER(bp) do { } while (0) -# define XB_CLEAR_OWNER(bp) do { } while (0) -# define XB_GET_OWNER(bp) do { } while (0) -#endif - -#define xb_to_gfp(flags) \ - ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \ - ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) - -#define xb_to_km(flags) \ - (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) - -#define xfs_buf_allocate(flags) \ - kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags)) -#define xfs_buf_deallocate(bp) \ - kmem_zone_free(xfs_buf_zone, (bp)); - -static inline int -xfs_buf_is_vmapped( - struct xfs_buf *bp) -{ - /* - * Return true if the buffer is vmapped. - * - * The XBF_MAPPED flag is set if the buffer should be mapped, but the - * code is clever enough to know it doesn't have to map a single page, - * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1. - */ - return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1; -} - -static inline int -xfs_buf_vmap_len( - struct xfs_buf *bp) -{ - return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; -} - -/* - * xfs_buf_lru_add - add a buffer to the LRU. - * - * The LRU takes a new reference to the buffer so that it will only be freed - * once the shrinker takes the buffer off the LRU. - */ -STATIC void -xfs_buf_lru_add( - struct xfs_buf *bp) -{ - struct xfs_buftarg *btp = bp->b_target; - - spin_lock(&btp->bt_lru_lock); - if (list_empty(&bp->b_lru)) { - atomic_inc(&bp->b_hold); - list_add_tail(&bp->b_lru, &btp->bt_lru); - btp->bt_lru_nr++; - } - spin_unlock(&btp->bt_lru_lock); -} - -/* - * xfs_buf_lru_del - remove a buffer from the LRU - * - * The unlocked check is safe here because it only occurs when there are not - * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there - * to optimise the shrinker removing the buffer from the LRU and calling - * xfs_buf_free(). i.e. it removes an unnecessary round trip on the - * bt_lru_lock. - */ -STATIC void -xfs_buf_lru_del( - struct xfs_buf *bp) -{ - struct xfs_buftarg *btp = bp->b_target; - - if (list_empty(&bp->b_lru)) - return; - - spin_lock(&btp->bt_lru_lock); - if (!list_empty(&bp->b_lru)) { - list_del_init(&bp->b_lru); - btp->bt_lru_nr--; - } - spin_unlock(&btp->bt_lru_lock); -} - -/* - * When we mark a buffer stale, we remove the buffer from the LRU and clear the - * b_lru_ref count so that the buffer is freed immediately when the buffer - * reference count falls to zero. If the buffer is already on the LRU, we need - * to remove the reference that LRU holds on the buffer. - * - * This prevents build-up of stale buffers on the LRU. - */ -void -xfs_buf_stale( - struct xfs_buf *bp) -{ - bp->b_flags |= XBF_STALE; - atomic_set(&(bp)->b_lru_ref, 0); - if (!list_empty(&bp->b_lru)) { - struct xfs_buftarg *btp = bp->b_target; - - spin_lock(&btp->bt_lru_lock); - if (!list_empty(&bp->b_lru)) { - list_del_init(&bp->b_lru); - btp->bt_lru_nr--; - atomic_dec(&bp->b_hold); - } - spin_unlock(&btp->bt_lru_lock); - } - ASSERT(atomic_read(&bp->b_hold) >= 1); -} - -STATIC void -_xfs_buf_initialize( - xfs_buf_t *bp, - xfs_buftarg_t *target, - xfs_off_t range_base, - size_t range_length, - xfs_buf_flags_t flags) -{ - /* - * We don't want certain flags to appear in b_flags. - */ - flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD); - - memset(bp, 0, sizeof(xfs_buf_t)); - atomic_set(&bp->b_hold, 1); - atomic_set(&bp->b_lru_ref, 1); - init_completion(&bp->b_iowait); - INIT_LIST_HEAD(&bp->b_lru); - INIT_LIST_HEAD(&bp->b_list); - RB_CLEAR_NODE(&bp->b_rbnode); - sema_init(&bp->b_sema, 0); /* held, no waiters */ - XB_SET_OWNER(bp); - bp->b_target = target; - bp->b_file_offset = range_base; - /* - * Set buffer_length and count_desired to the same value initially. - * I/O routines should use count_desired, which will be the same in - * most cases but may be reset (e.g. XFS recovery). - */ - bp->b_buffer_length = bp->b_count_desired = range_length; - bp->b_flags = flags; - bp->b_bn = XFS_BUF_DADDR_NULL; - atomic_set(&bp->b_pin_count, 0); - init_waitqueue_head(&bp->b_waiters); - - XFS_STATS_INC(xb_create); - - trace_xfs_buf_init(bp, _RET_IP_); -} - -/* - * Allocate a page array capable of holding a specified number - * of pages, and point the page buf at it. - */ -STATIC int -_xfs_buf_get_pages( - xfs_buf_t *bp, - int page_count, - xfs_buf_flags_t flags) -{ - /* Make sure that we have a page list */ - if (bp->b_pages == NULL) { - bp->b_offset = xfs_buf_poff(bp->b_file_offset); - bp->b_page_count = page_count; - if (page_count <= XB_PAGES) { - bp->b_pages = bp->b_page_array; - } else { - bp->b_pages = kmem_alloc(sizeof(struct page *) * - page_count, xb_to_km(flags)); - if (bp->b_pages == NULL) - return -ENOMEM; - } - memset(bp->b_pages, 0, sizeof(struct page *) * page_count); - } - return 0; -} - -/* - * Frees b_pages if it was allocated. - */ -STATIC void -_xfs_buf_free_pages( - xfs_buf_t *bp) -{ - if (bp->b_pages != bp->b_page_array) { - kmem_free(bp->b_pages); - bp->b_pages = NULL; - } -} - -/* - * Releases the specified buffer. - * - * The modification state of any associated pages is left unchanged. - * The buffer most not be on any hash - use xfs_buf_rele instead for - * hashed and refcounted buffers - */ -void -xfs_buf_free( - xfs_buf_t *bp) -{ - trace_xfs_buf_free(bp, _RET_IP_); - - ASSERT(list_empty(&bp->b_lru)); - - if (bp->b_flags & _XBF_PAGES) { - uint i; - - if (xfs_buf_is_vmapped(bp)) - vm_unmap_ram(bp->b_addr - bp->b_offset, - bp->b_page_count); - - for (i = 0; i < bp->b_page_count; i++) { - struct page *page = bp->b_pages[i]; - - __free_page(page); - } - } else if (bp->b_flags & _XBF_KMEM) - kmem_free(bp->b_addr); - _xfs_buf_free_pages(bp); - xfs_buf_deallocate(bp); -} - -/* - * Allocates all the pages for buffer in question and builds it's page list. - */ -STATIC int -xfs_buf_allocate_memory( - xfs_buf_t *bp, - uint flags) -{ - size_t size = bp->b_count_desired; - size_t nbytes, offset; - gfp_t gfp_mask = xb_to_gfp(flags); - unsigned short page_count, i; - xfs_off_t end; - int error; - - /* - * for buffers that are contained within a single page, just allocate - * the memory from the heap - there's no need for the complexity of - * page arrays to keep allocation down to order 0. - */ - if (bp->b_buffer_length < PAGE_SIZE) { - bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags)); - if (!bp->b_addr) { - /* low memory - use alloc_page loop instead */ - goto use_alloc_page; - } - - if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) & - PAGE_MASK) != - ((unsigned long)bp->b_addr & PAGE_MASK)) { - /* b_addr spans two pages - use alloc_page instead */ - kmem_free(bp->b_addr); - bp->b_addr = NULL; - goto use_alloc_page; - } - bp->b_offset = offset_in_page(bp->b_addr); - bp->b_pages = bp->b_page_array; - bp->b_pages[0] = virt_to_page(bp->b_addr); - bp->b_page_count = 1; - bp->b_flags |= XBF_MAPPED | _XBF_KMEM; - return 0; - } - -use_alloc_page: - end = bp->b_file_offset + bp->b_buffer_length; - page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); - error = _xfs_buf_get_pages(bp, page_count, flags); - if (unlikely(error)) - return error; - - offset = bp->b_offset; - bp->b_flags |= _XBF_PAGES; - - for (i = 0; i < bp->b_page_count; i++) { - struct page *page; - uint retries = 0; -retry: - page = alloc_page(gfp_mask); - if (unlikely(page == NULL)) { - if (flags & XBF_READ_AHEAD) { - bp->b_page_count = i; - error = ENOMEM; - goto out_free_pages; - } - - /* - * This could deadlock. - * - * But until all the XFS lowlevel code is revamped to - * handle buffer allocation failures we can't do much. - */ - if (!(++retries % 100)) - xfs_err(NULL, - "possible memory allocation deadlock in %s (mode:0x%x)", - __func__, gfp_mask); - - XFS_STATS_INC(xb_page_retries); - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry; - } - - XFS_STATS_INC(xb_page_found); - - nbytes = min_t(size_t, size, PAGE_SIZE - offset); - size -= nbytes; - bp->b_pages[i] = page; - offset = 0; - } - return 0; - -out_free_pages: - for (i = 0; i < bp->b_page_count; i++) - __free_page(bp->b_pages[i]); - return error; -} - -/* - * Map buffer into kernel address-space if necessary. - */ -STATIC int -_xfs_buf_map_pages( - xfs_buf_t *bp, - uint flags) -{ - ASSERT(bp->b_flags & _XBF_PAGES); - if (bp->b_page_count == 1) { - /* A single page buffer is always mappable */ - bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; - bp->b_flags |= XBF_MAPPED; - } else if (flags & XBF_MAPPED) { - int retried = 0; - - do { - bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, - -1, PAGE_KERNEL); - if (bp->b_addr) - break; - vm_unmap_aliases(); - } while (retried++ <= 1); - - if (!bp->b_addr) - return -ENOMEM; - bp->b_addr += bp->b_offset; - bp->b_flags |= XBF_MAPPED; - } - - return 0; -} - -/* - * Finding and Reading Buffers - */ - -/* - * Look up, and creates if absent, a lockable buffer for - * a given range of an inode. The buffer is returned - * locked. If other overlapping buffers exist, they are - * released before the new buffer is created and locked, - * which may imply that this call will block until those buffers - * are unlocked. No I/O is implied by this call. - */ -xfs_buf_t * -_xfs_buf_find( - xfs_buftarg_t *btp, /* block device target */ - xfs_off_t ioff, /* starting offset of range */ - size_t isize, /* length of range */ - xfs_buf_flags_t flags, - xfs_buf_t *new_bp) -{ - xfs_off_t range_base; - size_t range_length; - struct xfs_perag *pag; - struct rb_node **rbp; - struct rb_node *parent; - xfs_buf_t *bp; - - range_base = (ioff << BBSHIFT); - range_length = (isize << BBSHIFT); - - /* Check for IOs smaller than the sector size / not sector aligned */ - ASSERT(!(range_length < (1 << btp->bt_sshift))); - ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); - - /* get tree root */ - pag = xfs_perag_get(btp->bt_mount, - xfs_daddr_to_agno(btp->bt_mount, ioff)); - - /* walk tree */ - spin_lock(&pag->pag_buf_lock); - rbp = &pag->pag_buf_tree.rb_node; - parent = NULL; - bp = NULL; - while (*rbp) { - parent = *rbp; - bp = rb_entry(parent, struct xfs_buf, b_rbnode); - - if (range_base < bp->b_file_offset) - rbp = &(*rbp)->rb_left; - else if (range_base > bp->b_file_offset) - rbp = &(*rbp)->rb_right; - else { - /* - * found a block offset match. If the range doesn't - * match, the only way this is allowed is if the buffer - * in the cache is stale and the transaction that made - * it stale has not yet committed. i.e. we are - * reallocating a busy extent. Skip this buffer and - * continue searching to the right for an exact match. - */ - if (bp->b_buffer_length != range_length) { - ASSERT(bp->b_flags & XBF_STALE); - rbp = &(*rbp)->rb_right; - continue; - } - atomic_inc(&bp->b_hold); - goto found; - } - } - - /* No match found */ - if (new_bp) { - _xfs_buf_initialize(new_bp, btp, range_base, - range_length, flags); - rb_link_node(&new_bp->b_rbnode, parent, rbp); - rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); - /* the buffer keeps the perag reference until it is freed */ - new_bp->b_pag = pag; - spin_unlock(&pag->pag_buf_lock); - } else { - XFS_STATS_INC(xb_miss_locked); - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); - } - return new_bp; - -found: - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); - - if (!xfs_buf_trylock(bp)) { - if (flags & XBF_TRYLOCK) { - xfs_buf_rele(bp); - XFS_STATS_INC(xb_busy_locked); - return NULL; - } - xfs_buf_lock(bp); - XFS_STATS_INC(xb_get_locked_waited); - } - - /* - * if the buffer is stale, clear all the external state associated with - * it. We need to keep flags such as how we allocated the buffer memory - * intact here. - */ - if (bp->b_flags & XBF_STALE) { - ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); - bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES; - } - - trace_xfs_buf_find(bp, flags, _RET_IP_); - XFS_STATS_INC(xb_get_locked); - return bp; -} - -/* - * Assembles a buffer covering the specified range. - * Storage in memory for all portions of the buffer will be allocated, - * although backing storage may not be. - */ -xfs_buf_t * -xfs_buf_get( - xfs_buftarg_t *target,/* target for buffer */ - xfs_off_t ioff, /* starting offset of range */ - size_t isize, /* length of range */ - xfs_buf_flags_t flags) -{ - xfs_buf_t *bp, *new_bp; - int error = 0; - - new_bp = xfs_buf_allocate(flags); - if (unlikely(!new_bp)) - return NULL; - - bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); - if (bp == new_bp) { - error = xfs_buf_allocate_memory(bp, flags); - if (error) - goto no_buffer; - } else { - xfs_buf_deallocate(new_bp); - if (unlikely(bp == NULL)) - return NULL; - } - - if (!(bp->b_flags & XBF_MAPPED)) { - error = _xfs_buf_map_pages(bp, flags); - if (unlikely(error)) { - xfs_warn(target->bt_mount, - "%s: failed to map pages\n", __func__); - goto no_buffer; - } - } - - XFS_STATS_INC(xb_get); - - /* - * Always fill in the block number now, the mapped cases can do - * their own overlay of this later. - */ - bp->b_bn = ioff; - bp->b_count_desired = bp->b_buffer_length; - - trace_xfs_buf_get(bp, flags, _RET_IP_); - return bp; - - no_buffer: - if (flags & (XBF_LOCK | XBF_TRYLOCK)) - xfs_buf_unlock(bp); - xfs_buf_rele(bp); - return NULL; -} - -STATIC int -_xfs_buf_read( - xfs_buf_t *bp, - xfs_buf_flags_t flags) -{ - int status; - - ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); - ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); - - bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD); - bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); - - status = xfs_buf_iorequest(bp); - if (status || bp->b_error || (flags & XBF_ASYNC)) - return status; - return xfs_buf_iowait(bp); -} - -xfs_buf_t * -xfs_buf_read( - xfs_buftarg_t *target, - xfs_off_t ioff, - size_t isize, - xfs_buf_flags_t flags) -{ - xfs_buf_t *bp; - - flags |= XBF_READ; - - bp = xfs_buf_get(target, ioff, isize, flags); - if (bp) { - trace_xfs_buf_read(bp, flags, _RET_IP_); - - if (!XFS_BUF_ISDONE(bp)) { - XFS_STATS_INC(xb_get_read); - _xfs_buf_read(bp, flags); - } else if (flags & XBF_ASYNC) { - /* - * Read ahead call which is already satisfied, - * drop the buffer - */ - goto no_buffer; - } else { - /* We do not want read in the flags */ - bp->b_flags &= ~XBF_READ; - } - } - - return bp; - - no_buffer: - if (flags & (XBF_LOCK | XBF_TRYLOCK)) - xfs_buf_unlock(bp); - xfs_buf_rele(bp); - return NULL; -} - -/* - * If we are not low on memory then do the readahead in a deadlock - * safe manner. - */ -void -xfs_buf_readahead( - xfs_buftarg_t *target, - xfs_off_t ioff, - size_t isize) -{ - if (bdi_read_congested(target->bt_bdi)) - return; - - xfs_buf_read(target, ioff, isize, - XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK); -} - -/* - * Read an uncached buffer from disk. Allocates and returns a locked - * buffer containing the disk contents or nothing. - */ -struct xfs_buf * -xfs_buf_read_uncached( - struct xfs_mount *mp, - struct xfs_buftarg *target, - xfs_daddr_t daddr, - size_t length, - int flags) -{ - xfs_buf_t *bp; - int error; - - bp = xfs_buf_get_uncached(target, length, flags); - if (!bp) - return NULL; - - /* set up the buffer for a read IO */ - XFS_BUF_SET_ADDR(bp, daddr); - XFS_BUF_READ(bp); - - xfsbdstrat(mp, bp); - error = xfs_buf_iowait(bp); - if (error || bp->b_error) { - xfs_buf_relse(bp); - return NULL; - } - return bp; -} - -xfs_buf_t * -xfs_buf_get_empty( - size_t len, - xfs_buftarg_t *target) -{ - xfs_buf_t *bp; - - bp = xfs_buf_allocate(0); - if (bp) - _xfs_buf_initialize(bp, target, 0, len, 0); - return bp; -} - -/* - * Return a buffer allocated as an empty buffer and associated to external - * memory via xfs_buf_associate_memory() back to it's empty state. - */ -void -xfs_buf_set_empty( - struct xfs_buf *bp, - size_t len) -{ - if (bp->b_pages) - _xfs_buf_free_pages(bp); - - bp->b_pages = NULL; - bp->b_page_count = 0; - bp->b_addr = NULL; - bp->b_file_offset = 0; - bp->b_buffer_length = bp->b_count_desired = len; - bp->b_bn = XFS_BUF_DADDR_NULL; - bp->b_flags &= ~XBF_MAPPED; -} - -static inline struct page * -mem_to_page( - void *addr) -{ - if ((!is_vmalloc_addr(addr))) { - return virt_to_page(addr); - } else { - return vmalloc_to_page(addr); - } -} - -int -xfs_buf_associate_memory( - xfs_buf_t *bp, - void *mem, - size_t len) -{ - int rval; - int i = 0; - unsigned long pageaddr; - unsigned long offset; - size_t buflen; - int page_count; - - pageaddr = (unsigned long)mem & PAGE_MASK; - offset = (unsigned long)mem - pageaddr; - buflen = PAGE_ALIGN(len + offset); - page_count = buflen >> PAGE_SHIFT; - - /* Free any previous set of page pointers */ - if (bp->b_pages) - _xfs_buf_free_pages(bp); - - bp->b_pages = NULL; - bp->b_addr = mem; - - rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK); - if (rval) - return rval; - - bp->b_offset = offset; - - for (i = 0; i < bp->b_page_count; i++) { - bp->b_pages[i] = mem_to_page((void *)pageaddr); - pageaddr += PAGE_SIZE; - } - - bp->b_count_desired = len; - bp->b_buffer_length = buflen; - bp->b_flags |= XBF_MAPPED; - - return 0; -} - -xfs_buf_t * -xfs_buf_get_uncached( - struct xfs_buftarg *target, - size_t len, - int flags) -{ - unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; - int error, i; - xfs_buf_t *bp; - - bp = xfs_buf_allocate(0); - if (unlikely(bp == NULL)) - goto fail; - _xfs_buf_initialize(bp, target, 0, len, 0); - - error = _xfs_buf_get_pages(bp, page_count, 0); - if (error) - goto fail_free_buf; - - for (i = 0; i < page_count; i++) { - bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); - if (!bp->b_pages[i]) - goto fail_free_mem; - } - bp->b_flags |= _XBF_PAGES; - - error = _xfs_buf_map_pages(bp, XBF_MAPPED); - if (unlikely(error)) { - xfs_warn(target->bt_mount, - "%s: failed to map pages\n", __func__); - goto fail_free_mem; - } - - trace_xfs_buf_get_uncached(bp, _RET_IP_); - return bp; - - fail_free_mem: - while (--i >= 0) - __free_page(bp->b_pages[i]); - _xfs_buf_free_pages(bp); - fail_free_buf: - xfs_buf_deallocate(bp); - fail: - return NULL; -} - -/* - * Increment reference count on buffer, to hold the buffer concurrently - * with another thread which may release (free) the buffer asynchronously. - * Must hold the buffer already to call this function. - */ -void -xfs_buf_hold( - xfs_buf_t *bp) -{ - trace_xfs_buf_hold(bp, _RET_IP_); - atomic_inc(&bp->b_hold); -} - -/* - * Releases a hold on the specified buffer. If the - * the hold count is 1, calls xfs_buf_free. - */ -void -xfs_buf_rele( - xfs_buf_t *bp) -{ - struct xfs_perag *pag = bp->b_pag; - - trace_xfs_buf_rele(bp, _RET_IP_); - - if (!pag) { - ASSERT(list_empty(&bp->b_lru)); - ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); - if (atomic_dec_and_test(&bp->b_hold)) - xfs_buf_free(bp); - return; - } - - ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); - - ASSERT(atomic_read(&bp->b_hold) > 0); - if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { - if (!(bp->b_flags & XBF_STALE) && - atomic_read(&bp->b_lru_ref)) { - xfs_buf_lru_add(bp); - spin_unlock(&pag->pag_buf_lock); - } else { - xfs_buf_lru_del(bp); - ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); - rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); - xfs_buf_free(bp); - } - } -} - - -/* - * Lock a buffer object, if it is not already locked. - * - * If we come across a stale, pinned, locked buffer, we know that we are - * being asked to lock a buffer that has been reallocated. Because it is - * pinned, we know that the log has not been pushed to disk and hence it - * will still be locked. Rather than continuing to have trylock attempts - * fail until someone else pushes the log, push it ourselves before - * returning. This means that the xfsaild will not get stuck trying - * to push on stale inode buffers. - */ -int -xfs_buf_trylock( - struct xfs_buf *bp) -{ - int locked; - - locked = down_trylock(&bp->b_sema) == 0; - if (locked) - XB_SET_OWNER(bp); - else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) - xfs_log_force(bp->b_target->bt_mount, 0); - - trace_xfs_buf_trylock(bp, _RET_IP_); - return locked; -} - -/* - * Lock a buffer object. - * - * If we come across a stale, pinned, locked buffer, we know that we - * are being asked to lock a buffer that has been reallocated. Because - * it is pinned, we know that the log has not been pushed to disk and - * hence it will still be locked. Rather than sleeping until someone - * else pushes the log, push it ourselves before trying to get the lock. - */ -void -xfs_buf_lock( - struct xfs_buf *bp) -{ - trace_xfs_buf_lock(bp, _RET_IP_); - - if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) - xfs_log_force(bp->b_target->bt_mount, 0); - down(&bp->b_sema); - XB_SET_OWNER(bp); - - trace_xfs_buf_lock_done(bp, _RET_IP_); -} - -/* - * Releases the lock on the buffer object. - * If the buffer is marked delwri but is not queued, do so before we - * unlock the buffer as we need to set flags correctly. We also need to - * take a reference for the delwri queue because the unlocker is going to - * drop their's and they don't know we just queued it. - */ -void -xfs_buf_unlock( - struct xfs_buf *bp) -{ - if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) { - atomic_inc(&bp->b_hold); - bp->b_flags |= XBF_ASYNC; - xfs_buf_delwri_queue(bp, 0); - } - - XB_CLEAR_OWNER(bp); - up(&bp->b_sema); - - trace_xfs_buf_unlock(bp, _RET_IP_); -} - -STATIC void -xfs_buf_wait_unpin( - xfs_buf_t *bp) -{ - DECLARE_WAITQUEUE (wait, current); - - if (atomic_read(&bp->b_pin_count) == 0) - return; - - add_wait_queue(&bp->b_waiters, &wait); - for (;;) { - set_current_state(TASK_UNINTERRUPTIBLE); - if (atomic_read(&bp->b_pin_count) == 0) - break; - io_schedule(); - } - remove_wait_queue(&bp->b_waiters, &wait); - set_current_state(TASK_RUNNING); -} - -/* - * Buffer Utility Routines - */ - -STATIC void -xfs_buf_iodone_work( - struct work_struct *work) -{ - xfs_buf_t *bp = - container_of(work, xfs_buf_t, b_iodone_work); - - if (bp->b_iodone) - (*(bp->b_iodone))(bp); - else if (bp->b_flags & XBF_ASYNC) - xfs_buf_relse(bp); -} - -void -xfs_buf_ioend( - xfs_buf_t *bp, - int schedule) -{ - trace_xfs_buf_iodone(bp, _RET_IP_); - - bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); - if (bp->b_error == 0) - bp->b_flags |= XBF_DONE; - - if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { - if (schedule) { - INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); - queue_work(xfslogd_workqueue, &bp->b_iodone_work); - } else { - xfs_buf_iodone_work(&bp->b_iodone_work); - } - } else { - complete(&bp->b_iowait); - } -} - -void -xfs_buf_ioerror( - xfs_buf_t *bp, - int error) -{ - ASSERT(error >= 0 && error <= 0xffff); - bp->b_error = (unsigned short)error; - trace_xfs_buf_ioerror(bp, error, _RET_IP_); -} - -int -xfs_bwrite( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - int error; - - bp->b_flags |= XBF_WRITE; - bp->b_flags &= ~(XBF_ASYNC | XBF_READ); - - xfs_buf_delwri_dequeue(bp); - xfs_bdstrat_cb(bp); - - error = xfs_buf_iowait(bp); - if (error) - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); - xfs_buf_relse(bp); - return error; -} - -void -xfs_bdwrite( - void *mp, - struct xfs_buf *bp) -{ - trace_xfs_buf_bdwrite(bp, _RET_IP_); - - bp->b_flags &= ~XBF_READ; - bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); - - xfs_buf_delwri_queue(bp, 1); -} - -/* - * Called when we want to stop a buffer from getting written or read. - * We attach the EIO error, muck with its flags, and call xfs_buf_ioend - * so that the proper iodone callbacks get called. - */ -STATIC int -xfs_bioerror( - xfs_buf_t *bp) -{ -#ifdef XFSERRORDEBUG - ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); -#endif - - /* - * No need to wait until the buffer is unpinned, we aren't flushing it. - */ - xfs_buf_ioerror(bp, EIO); - - /* - * We're calling xfs_buf_ioend, so delete XBF_DONE flag. - */ - XFS_BUF_UNREAD(bp); - XFS_BUF_UNDELAYWRITE(bp); - XFS_BUF_UNDONE(bp); - XFS_BUF_STALE(bp); - - xfs_buf_ioend(bp, 0); - - return EIO; -} - -/* - * Same as xfs_bioerror, except that we are releasing the buffer - * here ourselves, and avoiding the xfs_buf_ioend call. - * This is meant for userdata errors; metadata bufs come with - * iodone functions attached, so that we can track down errors. - */ -STATIC int -xfs_bioerror_relse( - struct xfs_buf *bp) -{ - int64_t fl = bp->b_flags; - /* - * No need to wait until the buffer is unpinned. - * We aren't flushing it. - * - * chunkhold expects B_DONE to be set, whether - * we actually finish the I/O or not. We don't want to - * change that interface. - */ - XFS_BUF_UNREAD(bp); - XFS_BUF_UNDELAYWRITE(bp); - XFS_BUF_DONE(bp); - XFS_BUF_STALE(bp); - bp->b_iodone = NULL; - if (!(fl & XBF_ASYNC)) { - /* - * Mark b_error and B_ERROR _both_. - * Lot's of chunkcache code assumes that. - * There's no reason to mark error for - * ASYNC buffers. - */ - xfs_buf_ioerror(bp, EIO); - XFS_BUF_FINISH_IOWAIT(bp); - } else { - xfs_buf_relse(bp); - } - - return EIO; -} - - -/* - * All xfs metadata buffers except log state machine buffers - * get this attached as their b_bdstrat callback function. - * This is so that we can catch a buffer - * after prematurely unpinning it to forcibly shutdown the filesystem. - */ -int -xfs_bdstrat_cb( - struct xfs_buf *bp) -{ - if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); - /* - * Metadata write that didn't get logged but - * written delayed anyway. These aren't associated - * with a transaction, and can be ignored. - */ - if (!bp->b_iodone && !XFS_BUF_ISREAD(bp)) - return xfs_bioerror_relse(bp); - else - return xfs_bioerror(bp); - } - - xfs_buf_iorequest(bp); - return 0; -} - -/* - * Wrapper around bdstrat so that we can stop data from going to disk in case - * we are shutting down the filesystem. Typically user data goes thru this - * path; one of the exceptions is the superblock. - */ -void -xfsbdstrat( - struct xfs_mount *mp, - struct xfs_buf *bp) -{ - if (XFS_FORCED_SHUTDOWN(mp)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); - xfs_bioerror_relse(bp); - return; - } - - xfs_buf_iorequest(bp); -} - -STATIC void -_xfs_buf_ioend( - xfs_buf_t *bp, - int schedule) -{ - if (atomic_dec_and_test(&bp->b_io_remaining) == 1) - xfs_buf_ioend(bp, schedule); -} - -STATIC void -xfs_buf_bio_end_io( - struct bio *bio, - int error) -{ - xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; - - xfs_buf_ioerror(bp, -error); - - if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) - invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); - - _xfs_buf_ioend(bp, 1); - bio_put(bio); -} - -STATIC void -_xfs_buf_ioapply( - xfs_buf_t *bp) -{ - int rw, map_i, total_nr_pages, nr_pages; - struct bio *bio; - int offset = bp->b_offset; - int size = bp->b_count_desired; - sector_t sector = bp->b_bn; - - total_nr_pages = bp->b_page_count; - map_i = 0; - - if (bp->b_flags & XBF_WRITE) { - if (bp->b_flags & XBF_SYNCIO) - rw = WRITE_SYNC; - else - rw = WRITE; - if (bp->b_flags & XBF_FUA) - rw |= REQ_FUA; - if (bp->b_flags & XBF_FLUSH) - rw |= REQ_FLUSH; - } else if (bp->b_flags & XBF_READ_AHEAD) { - rw = READA; - } else { - rw = READ; - } - - /* we only use the buffer cache for meta-data */ - rw |= REQ_META; - -next_chunk: - atomic_inc(&bp->b_io_remaining); - nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); - if (nr_pages > total_nr_pages) - nr_pages = total_nr_pages; - - bio = bio_alloc(GFP_NOIO, nr_pages); - bio->bi_bdev = bp->b_target->bt_bdev; - bio->bi_sector = sector; - bio->bi_end_io = xfs_buf_bio_end_io; - bio->bi_private = bp; - - - for (; size && nr_pages; nr_pages--, map_i++) { - int rbytes, nbytes = PAGE_SIZE - offset; - - if (nbytes > size) - nbytes = size; - - rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset); - if (rbytes < nbytes) - break; - - offset = 0; - sector += nbytes >> BBSHIFT; - size -= nbytes; - total_nr_pages--; - } - - if (likely(bio->bi_size)) { - if (xfs_buf_is_vmapped(bp)) { - flush_kernel_vmap_range(bp->b_addr, - xfs_buf_vmap_len(bp)); - } - submit_bio(rw, bio); - if (size) - goto next_chunk; - } else { - xfs_buf_ioerror(bp, EIO); - bio_put(bio); - } -} - -int -xfs_buf_iorequest( - xfs_buf_t *bp) -{ - trace_xfs_buf_iorequest(bp, _RET_IP_); - - if (bp->b_flags & XBF_DELWRI) { - xfs_buf_delwri_queue(bp, 1); - return 0; - } - - if (bp->b_flags & XBF_WRITE) { - xfs_buf_wait_unpin(bp); - } - - xfs_buf_hold(bp); - - /* Set the count to 1 initially, this will stop an I/O - * completion callout which happens before we have started - * all the I/O from calling xfs_buf_ioend too early. - */ - atomic_set(&bp->b_io_remaining, 1); - _xfs_buf_ioapply(bp); - _xfs_buf_ioend(bp, 0); - - xfs_buf_rele(bp); - return 0; -} - -/* - * Waits for I/O to complete on the buffer supplied. - * It returns immediately if no I/O is pending. - * It returns the I/O error code, if any, or 0 if there was no error. - */ -int -xfs_buf_iowait( - xfs_buf_t *bp) -{ - trace_xfs_buf_iowait(bp, _RET_IP_); - - wait_for_completion(&bp->b_iowait); - - trace_xfs_buf_iowait_done(bp, _RET_IP_); - return bp->b_error; -} - -xfs_caddr_t -xfs_buf_offset( - xfs_buf_t *bp, - size_t offset) -{ - struct page *page; - - if (bp->b_flags & XBF_MAPPED) - return bp->b_addr + offset; - - offset += bp->b_offset; - page = bp->b_pages[offset >> PAGE_SHIFT]; - return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); -} - -/* - * Move data into or out of a buffer. - */ -void -xfs_buf_iomove( - xfs_buf_t *bp, /* buffer to process */ - size_t boff, /* starting buffer offset */ - size_t bsize, /* length to copy */ - void *data, /* data address */ - xfs_buf_rw_t mode) /* read/write/zero flag */ -{ - size_t bend, cpoff, csize; - struct page *page; - - bend = boff + bsize; - while (boff < bend) { - page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; - cpoff = xfs_buf_poff(boff + bp->b_offset); - csize = min_t(size_t, - PAGE_SIZE-cpoff, bp->b_count_desired-boff); - - ASSERT(((csize + cpoff) <= PAGE_SIZE)); - - switch (mode) { - case XBRW_ZERO: - memset(page_address(page) + cpoff, 0, csize); - break; - case XBRW_READ: - memcpy(data, page_address(page) + cpoff, csize); - break; - case XBRW_WRITE: - memcpy(page_address(page) + cpoff, data, csize); - } - - boff += csize; - data += csize; - } -} - -/* - * Handling of buffer targets (buftargs). - */ - -/* - * Wait for any bufs with callbacks that have been submitted but have not yet - * returned. These buffers will have an elevated hold count, so wait on those - * while freeing all the buffers only held by the LRU. - */ -void -xfs_wait_buftarg( - struct xfs_buftarg *btp) -{ - struct xfs_buf *bp; - -restart: - spin_lock(&btp->bt_lru_lock); - while (!list_empty(&btp->bt_lru)) { - bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); - if (atomic_read(&bp->b_hold) > 1) { - spin_unlock(&btp->bt_lru_lock); - delay(100); - goto restart; - } - /* - * clear the LRU reference count so the bufer doesn't get - * ignored in xfs_buf_rele(). - */ - atomic_set(&bp->b_lru_ref, 0); - spin_unlock(&btp->bt_lru_lock); - xfs_buf_rele(bp); - spin_lock(&btp->bt_lru_lock); - } - spin_unlock(&btp->bt_lru_lock); -} - -int -xfs_buftarg_shrink( - struct shrinker *shrink, - struct shrink_control *sc) -{ - struct xfs_buftarg *btp = container_of(shrink, - struct xfs_buftarg, bt_shrinker); - struct xfs_buf *bp; - int nr_to_scan = sc->nr_to_scan; - LIST_HEAD(dispose); - - if (!nr_to_scan) - return btp->bt_lru_nr; - - spin_lock(&btp->bt_lru_lock); - while (!list_empty(&btp->bt_lru)) { - if (nr_to_scan-- <= 0) - break; - - bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); - - /* - * Decrement the b_lru_ref count unless the value is already - * zero. If the value is already zero, we need to reclaim the - * buffer, otherwise it gets another trip through the LRU. - */ - if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { - list_move_tail(&bp->b_lru, &btp->bt_lru); - continue; - } - - /* - * remove the buffer from the LRU now to avoid needing another - * lock round trip inside xfs_buf_rele(). - */ - list_move(&bp->b_lru, &dispose); - btp->bt_lru_nr--; - } - spin_unlock(&btp->bt_lru_lock); - - while (!list_empty(&dispose)) { - bp = list_first_entry(&dispose, struct xfs_buf, b_lru); - list_del_init(&bp->b_lru); - xfs_buf_rele(bp); - } - - return btp->bt_lru_nr; -} - -void -xfs_free_buftarg( - struct xfs_mount *mp, - struct xfs_buftarg *btp) -{ - unregister_shrinker(&btp->bt_shrinker); - - xfs_flush_buftarg(btp, 1); - if (mp->m_flags & XFS_MOUNT_BARRIER) - xfs_blkdev_issue_flush(btp); - - kthread_stop(btp->bt_task); - kmem_free(btp); -} - -STATIC int -xfs_setsize_buftarg_flags( - xfs_buftarg_t *btp, - unsigned int blocksize, - unsigned int sectorsize, - int verbose) -{ - btp->bt_bsize = blocksize; - btp->bt_sshift = ffs(sectorsize) - 1; - btp->bt_smask = sectorsize - 1; - - if (set_blocksize(btp->bt_bdev, sectorsize)) { - xfs_warn(btp->bt_mount, - "Cannot set_blocksize to %u on device %s\n", - sectorsize, xfs_buf_target_name(btp)); - return EINVAL; - } - - return 0; -} - -/* - * When allocating the initial buffer target we have not yet - * read in the superblock, so don't know what sized sectors - * are being used is at this early stage. Play safe. - */ -STATIC int -xfs_setsize_buftarg_early( - xfs_buftarg_t *btp, - struct block_device *bdev) -{ - return xfs_setsize_buftarg_flags(btp, - PAGE_SIZE, bdev_logical_block_size(bdev), 0); -} - -int -xfs_setsize_buftarg( - xfs_buftarg_t *btp, - unsigned int blocksize, - unsigned int sectorsize) -{ - return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); -} - -STATIC int -xfs_alloc_delwrite_queue( - xfs_buftarg_t *btp, - const char *fsname) -{ - INIT_LIST_HEAD(&btp->bt_delwrite_queue); - spin_lock_init(&btp->bt_delwrite_lock); - btp->bt_flags = 0; - btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); - if (IS_ERR(btp->bt_task)) - return PTR_ERR(btp->bt_task); - return 0; -} - -xfs_buftarg_t * -xfs_alloc_buftarg( - struct xfs_mount *mp, - struct block_device *bdev, - int external, - const char *fsname) -{ - xfs_buftarg_t *btp; - - btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); - - btp->bt_mount = mp; - btp->bt_dev = bdev->bd_dev; - btp->bt_bdev = bdev; - btp->bt_bdi = blk_get_backing_dev_info(bdev); - if (!btp->bt_bdi) - goto error; - - INIT_LIST_HEAD(&btp->bt_lru); - spin_lock_init(&btp->bt_lru_lock); - if (xfs_setsize_buftarg_early(btp, bdev)) - goto error; - if (xfs_alloc_delwrite_queue(btp, fsname)) - goto error; - btp->bt_shrinker.shrink = xfs_buftarg_shrink; - btp->bt_shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&btp->bt_shrinker); - return btp; - -error: - kmem_free(btp); - return NULL; -} - - -/* - * Delayed write buffer handling - */ -STATIC void -xfs_buf_delwri_queue( - xfs_buf_t *bp, - int unlock) -{ - struct list_head *dwq = &bp->b_target->bt_delwrite_queue; - spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; - - trace_xfs_buf_delwri_queue(bp, _RET_IP_); - - ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); - - spin_lock(dwlk); - /* If already in the queue, dequeue and place at tail */ - if (!list_empty(&bp->b_list)) { - ASSERT(bp->b_flags & _XBF_DELWRI_Q); - if (unlock) - atomic_dec(&bp->b_hold); - list_del(&bp->b_list); - } - - if (list_empty(dwq)) { - /* start xfsbufd as it is about to have something to do */ - wake_up_process(bp->b_target->bt_task); - } - - bp->b_flags |= _XBF_DELWRI_Q; - list_add_tail(&bp->b_list, dwq); - bp->b_queuetime = jiffies; - spin_unlock(dwlk); - - if (unlock) - xfs_buf_unlock(bp); -} - -void -xfs_buf_delwri_dequeue( - xfs_buf_t *bp) -{ - spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; - int dequeued = 0; - - spin_lock(dwlk); - if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) { - ASSERT(bp->b_flags & _XBF_DELWRI_Q); - list_del_init(&bp->b_list); - dequeued = 1; - } - bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); - spin_unlock(dwlk); - - if (dequeued) - xfs_buf_rele(bp); - - trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); -} - -/* - * If a delwri buffer needs to be pushed before it has aged out, then promote - * it to the head of the delwri queue so that it will be flushed on the next - * xfsbufd run. We do this by resetting the queuetime of the buffer to be older - * than the age currently needed to flush the buffer. Hence the next time the - * xfsbufd sees it is guaranteed to be considered old enough to flush. - */ -void -xfs_buf_delwri_promote( - struct xfs_buf *bp) -{ - struct xfs_buftarg *btp = bp->b_target; - long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1; - - ASSERT(bp->b_flags & XBF_DELWRI); - ASSERT(bp->b_flags & _XBF_DELWRI_Q); - - /* - * Check the buffer age before locking the delayed write queue as we - * don't need to promote buffers that are already past the flush age. - */ - if (bp->b_queuetime < jiffies - age) - return; - bp->b_queuetime = jiffies - age; - spin_lock(&btp->bt_delwrite_lock); - list_move(&bp->b_list, &btp->bt_delwrite_queue); - spin_unlock(&btp->bt_delwrite_lock); -} - -STATIC void -xfs_buf_runall_queues( - struct workqueue_struct *queue) -{ - flush_workqueue(queue); -} - -/* - * Move as many buffers as specified to the supplied list - * idicating if we skipped any buffers to prevent deadlocks. - */ -STATIC int -xfs_buf_delwri_split( - xfs_buftarg_t *target, - struct list_head *list, - unsigned long age) -{ - xfs_buf_t *bp, *n; - struct list_head *dwq = &target->bt_delwrite_queue; - spinlock_t *dwlk = &target->bt_delwrite_lock; - int skipped = 0; - int force; - - force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags); - INIT_LIST_HEAD(list); - spin_lock(dwlk); - list_for_each_entry_safe(bp, n, dwq, b_list) { - ASSERT(bp->b_flags & XBF_DELWRI); - - if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) { - if (!force && - time_before(jiffies, bp->b_queuetime + age)) { - xfs_buf_unlock(bp); - break; - } - - bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q); - bp->b_flags |= XBF_WRITE; - list_move_tail(&bp->b_list, list); - trace_xfs_buf_delwri_split(bp, _RET_IP_); - } else - skipped++; - } - spin_unlock(dwlk); - - return skipped; - -} - -/* - * Compare function is more complex than it needs to be because - * the return value is only 32 bits and we are doing comparisons - * on 64 bit values - */ -static int -xfs_buf_cmp( - void *priv, - struct list_head *a, - struct list_head *b) -{ - struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); - struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); - xfs_daddr_t diff; - - diff = ap->b_bn - bp->b_bn; - if (diff < 0) - return -1; - if (diff > 0) - return 1; - return 0; -} - -STATIC int -xfsbufd( - void *data) -{ - xfs_buftarg_t *target = (xfs_buftarg_t *)data; - - current->flags |= PF_MEMALLOC; - - set_freezable(); - - do { - long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); - long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); - struct list_head tmp; - struct blk_plug plug; - - if (unlikely(freezing(current))) { - set_bit(XBT_FORCE_SLEEP, &target->bt_flags); - refrigerator(); - } else { - clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); - } - - /* sleep for a long time if there is nothing to do. */ - if (list_empty(&target->bt_delwrite_queue)) - tout = MAX_SCHEDULE_TIMEOUT; - schedule_timeout_interruptible(tout); - - xfs_buf_delwri_split(target, &tmp, age); - list_sort(NULL, &tmp, xfs_buf_cmp); - - blk_start_plug(&plug); - while (!list_empty(&tmp)) { - struct xfs_buf *bp; - bp = list_first_entry(&tmp, struct xfs_buf, b_list); - list_del_init(&bp->b_list); - xfs_bdstrat_cb(bp); - } - blk_finish_plug(&plug); - } while (!kthread_should_stop()); - - return 0; -} - -/* - * Go through all incore buffers, and release buffers if they belong to - * the given device. This is used in filesystem error handling to - * preserve the consistency of its metadata. - */ -int -xfs_flush_buftarg( - xfs_buftarg_t *target, - int wait) -{ - xfs_buf_t *bp; - int pincount = 0; - LIST_HEAD(tmp_list); - LIST_HEAD(wait_list); - struct blk_plug plug; - - xfs_buf_runall_queues(xfsconvertd_workqueue); - xfs_buf_runall_queues(xfsdatad_workqueue); - xfs_buf_runall_queues(xfslogd_workqueue); - - set_bit(XBT_FORCE_FLUSH, &target->bt_flags); - pincount = xfs_buf_delwri_split(target, &tmp_list, 0); - - /* - * Dropped the delayed write list lock, now walk the temporary list. - * All I/O is issued async and then if we need to wait for completion - * we do that after issuing all the IO. - */ - list_sort(NULL, &tmp_list, xfs_buf_cmp); - - blk_start_plug(&plug); - while (!list_empty(&tmp_list)) { - bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); - ASSERT(target == bp->b_target); - list_del_init(&bp->b_list); - if (wait) { - bp->b_flags &= ~XBF_ASYNC; - list_add(&bp->b_list, &wait_list); - } - xfs_bdstrat_cb(bp); - } - blk_finish_plug(&plug); - - if (wait) { - /* Wait for IO to complete. */ - while (!list_empty(&wait_list)) { - bp = list_first_entry(&wait_list, struct xfs_buf, b_list); - - list_del_init(&bp->b_list); - xfs_buf_iowait(bp); - xfs_buf_relse(bp); - } - } - - return pincount; -} - -int __init -xfs_buf_init(void) -{ - xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", - KM_ZONE_HWALIGN, NULL); - if (!xfs_buf_zone) - goto out; - - xfslogd_workqueue = alloc_workqueue("xfslogd", - WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); - if (!xfslogd_workqueue) - goto out_free_buf_zone; - - xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1); - if (!xfsdatad_workqueue) - goto out_destroy_xfslogd_workqueue; - - xfsconvertd_workqueue = alloc_workqueue("xfsconvertd", - WQ_MEM_RECLAIM, 1); - if (!xfsconvertd_workqueue) - goto out_destroy_xfsdatad_workqueue; - - return 0; - - out_destroy_xfsdatad_workqueue: - destroy_workqueue(xfsdatad_workqueue); - out_destroy_xfslogd_workqueue: - destroy_workqueue(xfslogd_workqueue); - out_free_buf_zone: - kmem_zone_destroy(xfs_buf_zone); - out: - return -ENOMEM; -} - -void -xfs_buf_terminate(void) -{ - destroy_workqueue(xfsconvertd_workqueue); - destroy_workqueue(xfsdatad_workqueue); - destroy_workqueue(xfslogd_workqueue); - kmem_zone_destroy(xfs_buf_zone); -} - -#ifdef CONFIG_KDB_MODULES -struct list_head * -xfs_get_buftarg_list(void) -{ - return &xfs_buftarg_list; -} -#endif diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h deleted file mode 100644 index 620972b..0000000 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_BUF_H__ -#define __XFS_BUF_H__ - -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Base types - */ - -#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) - -#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) -#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) -#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) - -typedef enum { - XBRW_READ = 1, /* transfer into target memory */ - XBRW_WRITE = 2, /* transfer from target memory */ - XBRW_ZERO = 3, /* Zero target memory */ -} xfs_buf_rw_t; - -#define XBF_READ (1 << 0) /* buffer intended for reading from device */ -#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ -#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ -#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */ -#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ -#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ -#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ -#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ - -/* I/O hints for the BIO layer */ -#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ -#define XBF_FUA (1 << 11)/* force cache write through mode */ -#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ - -/* flags used only as arguments to access routines */ -#define XBF_LOCK (1 << 15)/* lock requested */ -#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ -#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */ - -/* flags used only internally */ -#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ -#define _XBF_KMEM (1 << 21)/* backed by heap memory */ -#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */ - -typedef unsigned int xfs_buf_flags_t; - -#define XFS_BUF_FLAGS \ - { XBF_READ, "READ" }, \ - { XBF_WRITE, "WRITE" }, \ - { XBF_READ_AHEAD, "READ_AHEAD" }, \ - { XBF_MAPPED, "MAPPED" }, \ - { XBF_ASYNC, "ASYNC" }, \ - { XBF_DONE, "DONE" }, \ - { XBF_DELWRI, "DELWRI" }, \ - { XBF_STALE, "STALE" }, \ - { XBF_SYNCIO, "SYNCIO" }, \ - { XBF_FUA, "FUA" }, \ - { XBF_FLUSH, "FLUSH" }, \ - { XBF_LOCK, "LOCK" }, /* should never be set */\ - { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ - { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ - { _XBF_PAGES, "PAGES" }, \ - { _XBF_KMEM, "KMEM" }, \ - { _XBF_DELWRI_Q, "DELWRI_Q" } - -typedef enum { - XBT_FORCE_SLEEP = 0, - XBT_FORCE_FLUSH = 1, -} xfs_buftarg_flags_t; - -typedef struct xfs_buftarg { - dev_t bt_dev; - struct block_device *bt_bdev; - struct backing_dev_info *bt_bdi; - struct xfs_mount *bt_mount; - unsigned int bt_bsize; - unsigned int bt_sshift; - size_t bt_smask; - - /* per device delwri queue */ - struct task_struct *bt_task; - struct list_head bt_delwrite_queue; - spinlock_t bt_delwrite_lock; - unsigned long bt_flags; - - /* LRU control structures */ - struct shrinker bt_shrinker; - struct list_head bt_lru; - spinlock_t bt_lru_lock; - unsigned int bt_lru_nr; -} xfs_buftarg_t; - -struct xfs_buf; -typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); - -#define XB_PAGES 2 - -typedef struct xfs_buf { - /* - * first cacheline holds all the fields needed for an uncontended cache - * hit to be fully processed. The semaphore straddles the cacheline - * boundary, but the counter and lock sits on the first cacheline, - * which is the only bit that is touched if we hit the semaphore - * fast-path on locking. - */ - struct rb_node b_rbnode; /* rbtree node */ - xfs_off_t b_file_offset; /* offset in file */ - size_t b_buffer_length;/* size of buffer in bytes */ - atomic_t b_hold; /* reference count */ - atomic_t b_lru_ref; /* lru reclaim ref count */ - xfs_buf_flags_t b_flags; /* status flags */ - struct semaphore b_sema; /* semaphore for lockables */ - - struct list_head b_lru; /* lru list */ - wait_queue_head_t b_waiters; /* unpin waiters */ - struct list_head b_list; - struct xfs_perag *b_pag; /* contains rbtree root */ - xfs_buftarg_t *b_target; /* buffer target (device) */ - xfs_daddr_t b_bn; /* block number for I/O */ - size_t b_count_desired;/* desired transfer size */ - void *b_addr; /* virtual address of buffer */ - struct work_struct b_iodone_work; - xfs_buf_iodone_t b_iodone; /* I/O completion function */ - struct completion b_iowait; /* queue for I/O waiters */ - void *b_fspriv; - struct xfs_trans *b_transp; - struct page **b_pages; /* array of page pointers */ - struct page *b_page_array[XB_PAGES]; /* inline pages */ - unsigned long b_queuetime; /* time buffer was queued */ - atomic_t b_pin_count; /* pin count */ - atomic_t b_io_remaining; /* #outstanding I/O requests */ - unsigned int b_page_count; /* size of page array */ - unsigned int b_offset; /* page offset in first page */ - unsigned short b_error; /* error code on I/O */ -#ifdef XFS_BUF_LOCK_TRACKING - int b_last_holder; -#endif -} xfs_buf_t; - - -/* Finding and Reading Buffers */ -extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t, - xfs_buf_flags_t, xfs_buf_t *); -#define xfs_incore(buftarg,blkno,len,lockit) \ - _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) - -extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t, - xfs_buf_flags_t); -extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t, - xfs_buf_flags_t); - -extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); -extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len); -extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int); -extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); -extern void xfs_buf_hold(xfs_buf_t *); -extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t); -struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp, - struct xfs_buftarg *target, - xfs_daddr_t daddr, size_t length, int flags); - -/* Releasing Buffers */ -extern void xfs_buf_free(xfs_buf_t *); -extern void xfs_buf_rele(xfs_buf_t *); - -/* Locking and Unlocking Buffers */ -extern int xfs_buf_trylock(xfs_buf_t *); -extern void xfs_buf_lock(xfs_buf_t *); -extern void xfs_buf_unlock(xfs_buf_t *); -#define xfs_buf_islocked(bp) \ - ((bp)->b_sema.count <= 0) - -/* Buffer Read and Write Routines */ -extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); -extern void xfs_bdwrite(void *mp, xfs_buf_t *bp); - -extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); -extern int xfs_bdstrat_cb(struct xfs_buf *); - -extern void xfs_buf_ioend(xfs_buf_t *, int); -extern void xfs_buf_ioerror(xfs_buf_t *, int); -extern int xfs_buf_iorequest(xfs_buf_t *); -extern int xfs_buf_iowait(xfs_buf_t *); -extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, - xfs_buf_rw_t); -#define xfs_buf_zero(bp, off, len) \ - xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) - -static inline int xfs_buf_geterror(xfs_buf_t *bp) -{ - return bp ? bp->b_error : ENOMEM; -} - -/* Buffer Utility Routines */ -extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); - -/* Delayed Write Buffer Routines */ -extern void xfs_buf_delwri_dequeue(xfs_buf_t *); -extern void xfs_buf_delwri_promote(xfs_buf_t *); - -/* Buffer Daemon Setup Routines */ -extern int xfs_buf_init(void); -extern void xfs_buf_terminate(void); - -static inline const char * -xfs_buf_target_name(struct xfs_buftarg *target) -{ - static char __b[BDEVNAME_SIZE]; - - return bdevname(target->bt_bdev, __b); -} - - -#define XFS_BUF_ZEROFLAGS(bp) \ - ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ - XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) - -void xfs_buf_stale(struct xfs_buf *bp); -#define XFS_BUF_STALE(bp) xfs_buf_stale(bp); -#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) -#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) -#define XFS_BUF_SUPER_STALE(bp) do { \ - XFS_BUF_STALE(bp); \ - xfs_buf_delwri_dequeue(bp); \ - XFS_BUF_DONE(bp); \ - } while (0) - -#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) -#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) -#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) - -#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) -#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) -#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) - -#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) -#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) -#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) - -#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) -#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) -#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) - -#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE) -#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) -#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) - -#define XFS_BUF_ADDR(bp) ((bp)->b_bn) -#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) -#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset) -#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off)) -#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired) -#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt)) -#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length) -#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt)) - -static inline void -xfs_buf_set_ref( - struct xfs_buf *bp, - int lru_ref) -{ - atomic_set(&bp->b_lru_ref, lru_ref); -} -#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref) -#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) - -static inline int xfs_buf_ispinned(struct xfs_buf *bp) -{ - return atomic_read(&bp->b_pin_count); -} - -#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); - -static inline void xfs_buf_relse(xfs_buf_t *bp) -{ - xfs_buf_unlock(bp); - xfs_buf_rele(bp); -} - -/* - * Handling of buftargs. - */ -extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, - struct block_device *, int, const char *); -extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); -extern void xfs_wait_buftarg(xfs_buftarg_t *); -extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); -extern int xfs_flush_buftarg(xfs_buftarg_t *, int); - -#ifdef CONFIG_KDB_MODULES -extern struct list_head *xfs_get_buftarg_list(void); -#endif - -#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) -#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) - -#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1) -#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1) - -#endif /* __XFS_BUF_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c deleted file mode 100644 index 244e797..0000000 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright (C) 2010 Red Hat, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_sb.h" -#include "xfs_inum.h" -#include "xfs_log.h" -#include "xfs_ag.h" -#include "xfs_mount.h" -#include "xfs_quota.h" -#include "xfs_trans.h" -#include "xfs_alloc_btree.h" -#include "xfs_bmap_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_btree.h" -#include "xfs_inode.h" -#include "xfs_alloc.h" -#include "xfs_error.h" -#include "xfs_discard.h" -#include "xfs_trace.h" - -STATIC int -xfs_trim_extents( - struct xfs_mount *mp, - xfs_agnumber_t agno, - xfs_fsblock_t start, - xfs_fsblock_t len, - xfs_fsblock_t minlen, - __uint64_t *blocks_trimmed) -{ - struct block_device *bdev = mp->m_ddev_targp->bt_bdev; - struct xfs_btree_cur *cur; - struct xfs_buf *agbp; - struct xfs_perag *pag; - int error; - int i; - - pag = xfs_perag_get(mp, agno); - - error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); - if (error || !agbp) - goto out_put_perag; - - cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); - - /* - * Force out the log. This means any transactions that might have freed - * space before we took the AGF buffer lock are now on disk, and the - * volatile disk cache is flushed. - */ - xfs_log_force(mp, XFS_LOG_SYNC); - - /* - * Look up the longest btree in the AGF and start with it. - */ - error = xfs_alloc_lookup_le(cur, 0, - XFS_BUF_TO_AGF(agbp)->agf_longest, &i); - if (error) - goto out_del_cursor; - - /* - * Loop until we are done with all extents that are large - * enough to be worth discarding. - */ - while (i) { - xfs_agblock_t fbno; - xfs_extlen_t flen; - - error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); - if (error) - goto out_del_cursor; - XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); - ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest); - - /* - * Too small? Give up. - */ - if (flen < minlen) { - trace_xfs_discard_toosmall(mp, agno, fbno, flen); - goto out_del_cursor; - } - - /* - * If the extent is entirely outside of the range we are - * supposed to discard skip it. Do not bother to trim - * down partially overlapping ranges for now. - */ - if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start || - XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) { - trace_xfs_discard_exclude(mp, agno, fbno, flen); - goto next_extent; - } - - /* - * If any blocks in the range are still busy, skip the - * discard and try again the next time. - */ - if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { - trace_xfs_discard_busy(mp, agno, fbno, flen); - goto next_extent; - } - - trace_xfs_discard_extent(mp, agno, fbno, flen); - error = -blkdev_issue_discard(bdev, - XFS_AGB_TO_DADDR(mp, agno, fbno), - XFS_FSB_TO_BB(mp, flen), - GFP_NOFS, 0); - if (error) - goto out_del_cursor; - *blocks_trimmed += flen; - -next_extent: - error = xfs_btree_decrement(cur, 0, &i); - if (error) - goto out_del_cursor; - } - -out_del_cursor: - xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); - xfs_buf_relse(agbp); -out_put_perag: - xfs_perag_put(pag); - return error; -} - -int -xfs_ioc_trim( - struct xfs_mount *mp, - struct fstrim_range __user *urange) -{ - struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; - unsigned int granularity = q->limits.discard_granularity; - struct fstrim_range range; - xfs_fsblock_t start, len, minlen; - xfs_agnumber_t start_agno, end_agno, agno; - __uint64_t blocks_trimmed = 0; - int error, last_error = 0; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - if (!blk_queue_discard(q)) - return -XFS_ERROR(EOPNOTSUPP); - if (copy_from_user(&range, urange, sizeof(range))) - return -XFS_ERROR(EFAULT); - - /* - * Truncating down the len isn't actually quite correct, but using - * XFS_B_TO_FSB would mean we trivially get overflows for values - * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default - * used by the fstrim application. In the end it really doesn't - * matter as trimming blocks is an advisory interface. - */ - start = XFS_B_TO_FSBT(mp, range.start); - len = XFS_B_TO_FSBT(mp, range.len); - minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen)); - - start_agno = XFS_FSB_TO_AGNO(mp, start); - if (start_agno >= mp->m_sb.sb_agcount) - return -XFS_ERROR(EINVAL); - - end_agno = XFS_FSB_TO_AGNO(mp, start + len); - if (end_agno >= mp->m_sb.sb_agcount) - end_agno = mp->m_sb.sb_agcount - 1; - - for (agno = start_agno; agno <= end_agno; agno++) { - error = -xfs_trim_extents(mp, agno, start, len, minlen, - &blocks_trimmed); - if (error) - last_error = error; - } - - if (last_error) - return last_error; - - range.len = XFS_FSB_TO_B(mp, blocks_trimmed); - if (copy_to_user(urange, &range, sizeof(range))) - return -XFS_ERROR(EFAULT); - return 0; -} - -int -xfs_discard_extents( - struct xfs_mount *mp, - struct list_head *list) -{ - struct xfs_busy_extent *busyp; - int error = 0; - - list_for_each_entry(busyp, list, list) { - trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, - busyp->length); - - error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, - XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), - XFS_FSB_TO_BB(mp, busyp->length), - GFP_NOFS, 0); - if (error && error != EOPNOTSUPP) { - xfs_info(mp, - "discard failed for extent [0x%llu,%u], error %d", - (unsigned long long)busyp->bno, - busyp->length, - error); - return error; - } - } - - return 0; -} diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h deleted file mode 100644 index 344879a..0000000 --- a/fs/xfs/linux-2.6/xfs_discard.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef XFS_DISCARD_H -#define XFS_DISCARD_H 1 - -struct fstrim_range; -struct list_head; - -extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); -extern int xfs_discard_extents(struct xfs_mount *, struct list_head *); - -#endif /* XFS_DISCARD_H */ diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c deleted file mode 100644 index 75e5d32..0000000 --- a/fs/xfs/linux-2.6/xfs_export.c +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright (c) 2004-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_types.h" -#include "xfs_inum.h" -#include "xfs_log.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_dir2.h" -#include "xfs_mount.h" -#include "xfs_export.h" -#include "xfs_vnodeops.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_inode_item.h" -#include "xfs_trace.h" - -/* - * Note that we only accept fileids which are long enough rather than allow - * the parent generation number to default to zero. XFS considers zero a - * valid generation number not an invalid/wildcard value. - */ -static int xfs_fileid_length(int fileid_type) -{ - switch (fileid_type) { - case FILEID_INO32_GEN: - return 2; - case FILEID_INO32_GEN_PARENT: - return 4; - case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: - return 3; - case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: - return 6; - } - return 255; /* invalid */ -} - -STATIC int -xfs_fs_encode_fh( - struct dentry *dentry, - __u32 *fh, - int *max_len, - int connectable) -{ - struct fid *fid = (struct fid *)fh; - struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fh; - struct inode *inode = dentry->d_inode; - int fileid_type; - int len; - - /* Directories don't need their parent encoded, they have ".." */ - if (S_ISDIR(inode->i_mode) || !connectable) - fileid_type = FILEID_INO32_GEN; - else - fileid_type = FILEID_INO32_GEN_PARENT; - - /* - * If the the filesystem may contain 64bit inode numbers, we need - * to use larger file handles that can represent them. - * - * While we only allocate inodes that do not fit into 32 bits any - * large enough filesystem may contain them, thus the slightly - * confusing looking conditional below. - */ - if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) || - (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES)) - fileid_type |= XFS_FILEID_TYPE_64FLAG; - - /* - * Only encode if there is enough space given. In practice - * this means we can't export a filesystem with 64bit inodes - * over NFSv2 with the subtree_check export option; the other - * seven combinations work. The real answer is "don't use v2". - */ - len = xfs_fileid_length(fileid_type); - if (*max_len < len) { - *max_len = len; - return 255; - } - *max_len = len; - - switch (fileid_type) { - case FILEID_INO32_GEN_PARENT: - spin_lock(&dentry->d_lock); - fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino; - fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; - spin_unlock(&dentry->d_lock); - /*FALLTHRU*/ - case FILEID_INO32_GEN: - fid->i32.ino = inode->i_ino; - fid->i32.gen = inode->i_generation; - break; - case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: - spin_lock(&dentry->d_lock); - fid64->parent_ino = dentry->d_parent->d_inode->i_ino; - fid64->parent_gen = dentry->d_parent->d_inode->i_generation; - spin_unlock(&dentry->d_lock); - /*FALLTHRU*/ - case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: - fid64->ino = inode->i_ino; - fid64->gen = inode->i_generation; - break; - } - - return fileid_type; -} - -STATIC struct inode * -xfs_nfs_get_inode( - struct super_block *sb, - u64 ino, - u32 generation) - { - xfs_mount_t *mp = XFS_M(sb); - xfs_inode_t *ip; - int error; - - /* - * NFS can sometimes send requests for ino 0. Fail them gracefully. - */ - if (ino == 0) - return ERR_PTR(-ESTALE); - - /* - * The XFS_IGET_UNTRUSTED means that an invalid inode number is just - * fine and not an indication of a corrupted filesystem as clients can - * send invalid file handles and we have to handle it gracefully.. - */ - error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip); - if (error) { - /* - * EINVAL means the inode cluster doesn't exist anymore. - * This implies the filehandle is stale, so we should - * translate it here. - * We don't use ESTALE directly down the chain to not - * confuse applications using bulkstat that expect EINVAL. - */ - if (error == EINVAL || error == ENOENT) - error = ESTALE; - return ERR_PTR(-error); - } - - if (ip->i_d.di_gen != generation) { - IRELE(ip); - return ERR_PTR(-ESTALE); - } - - return VFS_I(ip); -} - -STATIC struct dentry * -xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid, - int fh_len, int fileid_type) -{ - struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid; - struct inode *inode = NULL; - - if (fh_len < xfs_fileid_length(fileid_type)) - return NULL; - - switch (fileid_type) { - case FILEID_INO32_GEN_PARENT: - case FILEID_INO32_GEN: - inode = xfs_nfs_get_inode(sb, fid->i32.ino, fid->i32.gen); - break; - case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: - case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: - inode = xfs_nfs_get_inode(sb, fid64->ino, fid64->gen); - break; - } - - return d_obtain_alias(inode); -} - -STATIC struct dentry * -xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid, - int fh_len, int fileid_type) -{ - struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid; - struct inode *inode = NULL; - - switch (fileid_type) { - case FILEID_INO32_GEN_PARENT: - inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino, - fid->i32.parent_gen); - break; - case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: - inode = xfs_nfs_get_inode(sb, fid64->parent_ino, - fid64->parent_gen); - break; - } - - return d_obtain_alias(inode); -} - -STATIC struct dentry * -xfs_fs_get_parent( - struct dentry *child) -{ - int error; - struct xfs_inode *cip; - - error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); - if (unlikely(error)) - return ERR_PTR(-error); - - return d_obtain_alias(VFS_I(cip)); -} - -STATIC int -xfs_fs_nfs_commit_metadata( - struct inode *inode) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - int error = 0; - - xfs_ilock(ip, XFS_ILOCK_SHARED); - if (xfs_ipincount(ip)) { - error = _xfs_log_force_lsn(mp, ip->i_itemp->ili_last_lsn, - XFS_LOG_SYNC, NULL); - } - xfs_iunlock(ip, XFS_ILOCK_SHARED); - - return error; -} - -const struct export_operations xfs_export_operations = { - .encode_fh = xfs_fs_encode_fh, - .fh_to_dentry = xfs_fs_fh_to_dentry, - .fh_to_parent = xfs_fs_fh_to_parent, - .get_parent = xfs_fs_get_parent, - .commit_metadata = xfs_fs_nfs_commit_metadata, -}; diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/linux-2.6/xfs_export.h deleted file mode 100644 index 3272b6a..0000000 --- a/fs/xfs/linux-2.6/xfs_export.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_EXPORT_H__ -#define __XFS_EXPORT_H__ - -/* - * Common defines for code related to exporting XFS filesystems over NFS. - * - * The NFS fileid goes out on the wire as an array of - * 32bit unsigned ints in host order. There are 5 possible - * formats. - * - * (1) fileid_type=0x00 - * (no fileid data; handled by the generic code) - * - * (2) fileid_type=0x01 - * inode-num - * generation - * - * (3) fileid_type=0x02 - * inode-num - * generation - * parent-inode-num - * parent-generation - * - * (4) fileid_type=0x81 - * inode-num-lo32 - * inode-num-hi32 - * generation - * - * (5) fileid_type=0x82 - * inode-num-lo32 - * inode-num-hi32 - * generation - * parent-inode-num-lo32 - * parent-inode-num-hi32 - * parent-generation - * - * Note, the NFS filehandle also includes an fsid portion which - * may have an inode number in it. That number is hardcoded to - * 32bits and there is no way for XFS to intercept it. In - * practice this means when exporting an XFS filesystem with 64bit - * inodes you should either export the mountpoint (rather than - * a subdirectory) or use the "fsid" export option. - */ - -struct xfs_fid64 { - u64 ino; - u32 gen; - u64 parent_ino; - u32 parent_gen; -} __attribute__((packed)); - -/* This flag goes on the wire. Don't play with it. */ -#define XFS_FILEID_TYPE_64FLAG 0x80 /* NFS fileid has 64bit inodes */ - -#endif /* __XFS_EXPORT_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c deleted file mode 100644 index 7f7b424..0000000 --- a/fs/xfs/linux-2.6/xfs_file.c +++ /dev/null @@ -1,1096 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_trans.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_alloc.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_inode_item.h" -#include "xfs_bmap.h" -#include "xfs_error.h" -#include "xfs_vnodeops.h" -#include "xfs_da_btree.h" -#include "xfs_ioctl.h" -#include "xfs_trace.h" - -#include -#include - -static const struct vm_operations_struct xfs_file_vm_ops; - -/* - * Locking primitives for read and write IO paths to ensure we consistently use - * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. - */ -static inline void -xfs_rw_ilock( - struct xfs_inode *ip, - int type) -{ - if (type & XFS_IOLOCK_EXCL) - mutex_lock(&VFS_I(ip)->i_mutex); - xfs_ilock(ip, type); -} - -static inline void -xfs_rw_iunlock( - struct xfs_inode *ip, - int type) -{ - xfs_iunlock(ip, type); - if (type & XFS_IOLOCK_EXCL) - mutex_unlock(&VFS_I(ip)->i_mutex); -} - -static inline void -xfs_rw_ilock_demote( - struct xfs_inode *ip, - int type) -{ - xfs_ilock_demote(ip, type); - if (type & XFS_IOLOCK_EXCL) - mutex_unlock(&VFS_I(ip)->i_mutex); -} - -/* - * xfs_iozero - * - * xfs_iozero clears the specified range of buffer supplied, - * and marks all the affected blocks as valid and modified. If - * an affected block is not allocated, it will be allocated. If - * an affected block is not completely overwritten, and is not - * valid before the operation, it will be read from disk before - * being partially zeroed. - */ -STATIC int -xfs_iozero( - struct xfs_inode *ip, /* inode */ - loff_t pos, /* offset in file */ - size_t count) /* size of data to zero */ -{ - struct page *page; - struct address_space *mapping; - int status; - - mapping = VFS_I(ip)->i_mapping; - do { - unsigned offset, bytes; - void *fsdata; - - offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - bytes = PAGE_CACHE_SIZE - offset; - if (bytes > count) - bytes = count; - - status = pagecache_write_begin(NULL, mapping, pos, bytes, - AOP_FLAG_UNINTERRUPTIBLE, - &page, &fsdata); - if (status) - break; - - zero_user(page, offset, bytes); - - status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, - page, fsdata); - WARN_ON(status <= 0); /* can't return less than zero! */ - pos += bytes; - count -= bytes; - status = 0; - } while (count); - - return (-status); -} - -STATIC int -xfs_file_fsync( - struct file *file, - loff_t start, - loff_t end, - int datasync) -{ - struct inode *inode = file->f_mapping->host; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error = 0; - int log_flushed = 0; - - trace_xfs_file_fsync(ip); - - error = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (error) - return error; - - if (XFS_FORCED_SHUTDOWN(mp)) - return -XFS_ERROR(EIO); - - xfs_iflags_clear(ip, XFS_ITRUNCATED); - - xfs_ilock(ip, XFS_IOLOCK_SHARED); - xfs_ioend_wait(ip); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); - - if (mp->m_flags & XFS_MOUNT_BARRIER) { - /* - * If we have an RT and/or log subvolume we need to make sure - * to flush the write cache the device used for file data - * first. This is to ensure newly written file data make - * it to disk before logging the new inode size in case of - * an extending write. - */ - if (XFS_IS_REALTIME_INODE(ip)) - xfs_blkdev_issue_flush(mp->m_rtdev_targp); - else if (mp->m_logdev_targp != mp->m_ddev_targp) - xfs_blkdev_issue_flush(mp->m_ddev_targp); - } - - /* - * We always need to make sure that the required inode state is safe on - * disk. The inode might be clean but we still might need to force the - * log because of committed transactions that haven't hit the disk yet. - * Likewise, there could be unflushed non-transactional changes to the - * inode core that have to go to disk and this requires us to issue - * a synchronous transaction to capture these changes correctly. - * - * This code relies on the assumption that if the i_update_core field - * of the inode is clear and the inode is unpinned then it is clean - * and no action is required. - */ - xfs_ilock(ip, XFS_ILOCK_SHARED); - - /* - * First check if the VFS inode is marked dirty. All the dirtying - * of non-transactional updates no goes through mark_inode_dirty*, - * which allows us to distinguish beteeen pure timestamp updates - * and i_size updates which need to be caught for fdatasync. - * After that also theck for the dirty state in the XFS inode, which - * might gets cleared when the inode gets written out via the AIL - * or xfs_iflush_cluster. - */ - if (((inode->i_state & I_DIRTY_DATASYNC) || - ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && - ip->i_update_core) { - /* - * Kick off a transaction to log the inode core to get the - * updates. The sync transaction will also force the log. - */ - xfs_iunlock(ip, XFS_ILOCK_SHARED); - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, - XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return -error; - } - xfs_ilock(ip, XFS_ILOCK_EXCL); - - /* - * Note - it's possible that we might have pushed ourselves out - * of the way during trans_reserve which would flush the inode. - * But there's no guarantee that the inode buffer has actually - * gone out yet (it's delwri). Plus the buffer could be pinned - * anyway if it's part of an inode in another recent - * transaction. So we play it safe and fire off the - * transaction anyway. - */ - xfs_trans_ijoin(tp, ip); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - xfs_trans_set_sync(tp); - error = _xfs_trans_commit(tp, 0, &log_flushed); - - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } else { - /* - * Timestamps/size haven't changed since last inode flush or - * inode transaction commit. That means either nothing got - * written or a transaction committed which caught the updates. - * If the latter happened and the transaction hasn't hit the - * disk yet, the inode will be still be pinned. If it is, - * force the log. - */ - if (xfs_ipincount(ip)) { - error = _xfs_log_force_lsn(mp, - ip->i_itemp->ili_last_lsn, - XFS_LOG_SYNC, &log_flushed); - } - xfs_iunlock(ip, XFS_ILOCK_SHARED); - } - - /* - * If we only have a single device, and the log force about was - * a no-op we might have to flush the data device cache here. - * This can only happen for fdatasync/O_DSYNC if we were overwriting - * an already allocated file and thus do not have any metadata to - * commit. - */ - if ((mp->m_flags & XFS_MOUNT_BARRIER) && - mp->m_logdev_targp == mp->m_ddev_targp && - !XFS_IS_REALTIME_INODE(ip) && - !log_flushed) - xfs_blkdev_issue_flush(mp->m_ddev_targp); - - return -error; -} - -STATIC ssize_t -xfs_file_aio_read( - struct kiocb *iocb, - const struct iovec *iovp, - unsigned long nr_segs, - loff_t pos) -{ - struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - size_t size = 0; - ssize_t ret = 0; - int ioflags = 0; - xfs_fsize_t n; - unsigned long seg; - - XFS_STATS_INC(xs_read_calls); - - BUG_ON(iocb->ki_pos != pos); - - if (unlikely(file->f_flags & O_DIRECT)) - ioflags |= IO_ISDIRECT; - if (file->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; - - /* START copy & waste from filemap.c */ - for (seg = 0; seg < nr_segs; seg++) { - const struct iovec *iv = &iovp[seg]; - - /* - * If any segment has a negative length, or the cumulative - * length ever wraps negative then return -EINVAL. - */ - size += iv->iov_len; - if (unlikely((ssize_t)(size|iv->iov_len) < 0)) - return XFS_ERROR(-EINVAL); - } - /* END copy & waste from filemap.c */ - - if (unlikely(ioflags & IO_ISDIRECT)) { - xfs_buftarg_t *target = - XFS_IS_REALTIME_INODE(ip) ? - mp->m_rtdev_targp : mp->m_ddev_targp; - if ((iocb->ki_pos & target->bt_smask) || - (size & target->bt_smask)) { - if (iocb->ki_pos == ip->i_size) - return 0; - return -XFS_ERROR(EINVAL); - } - } - - n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; - if (n <= 0 || size == 0) - return 0; - - if (n < size) - size = n; - - if (XFS_FORCED_SHUTDOWN(mp)) - return -EIO; - - if (unlikely(ioflags & IO_ISDIRECT)) { - xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); - - if (inode->i_mapping->nrpages) { - ret = -xfs_flushinval_pages(ip, - (iocb->ki_pos & PAGE_CACHE_MASK), - -1, FI_REMAPF_LOCKED); - if (ret) { - xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); - return ret; - } - } - xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); - } else - xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); - - trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); - - ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); - if (ret > 0) - XFS_STATS_ADD(xs_read_bytes, ret); - - xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); - return ret; -} - -STATIC ssize_t -xfs_file_splice_read( - struct file *infilp, - loff_t *ppos, - struct pipe_inode_info *pipe, - size_t count, - unsigned int flags) -{ - struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); - int ioflags = 0; - ssize_t ret; - - XFS_STATS_INC(xs_read_calls); - - if (infilp->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; - - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return -EIO; - - xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); - - trace_xfs_file_splice_read(ip, count, *ppos, ioflags); - - ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); - if (ret > 0) - XFS_STATS_ADD(xs_read_bytes, ret); - - xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); - return ret; -} - -STATIC void -xfs_aio_write_isize_update( - struct inode *inode, - loff_t *ppos, - ssize_t bytes_written) -{ - struct xfs_inode *ip = XFS_I(inode); - xfs_fsize_t isize = i_size_read(inode); - - if (bytes_written > 0) - XFS_STATS_ADD(xs_write_bytes, bytes_written); - - if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && - *ppos > isize)) - *ppos = isize; - - if (*ppos > ip->i_size) { - xfs_rw_ilock(ip, XFS_ILOCK_EXCL); - if (*ppos > ip->i_size) - ip->i_size = *ppos; - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - } -} - -/* - * If this was a direct or synchronous I/O that failed (such as ENOSPC) then - * part of the I/O may have been written to disk before the error occurred. In - * this case the on-disk file size may have been adjusted beyond the in-memory - * file size and now needs to be truncated back. - */ -STATIC void -xfs_aio_write_newsize_update( - struct xfs_inode *ip) -{ - if (ip->i_new_size) { - xfs_rw_ilock(ip, XFS_ILOCK_EXCL); - ip->i_new_size = 0; - if (ip->i_d.di_size > ip->i_size) - ip->i_d.di_size = ip->i_size; - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - } -} - -/* - * xfs_file_splice_write() does not use xfs_rw_ilock() because - * generic_file_splice_write() takes the i_mutex itself. This, in theory, - * couuld cause lock inversions between the aio_write path and the splice path - * if someone is doing concurrent splice(2) based writes and write(2) based - * writes to the same inode. The only real way to fix this is to re-implement - * the generic code here with correct locking orders. - */ -STATIC ssize_t -xfs_file_splice_write( - struct pipe_inode_info *pipe, - struct file *outfilp, - loff_t *ppos, - size_t count, - unsigned int flags) -{ - struct inode *inode = outfilp->f_mapping->host; - struct xfs_inode *ip = XFS_I(inode); - xfs_fsize_t new_size; - int ioflags = 0; - ssize_t ret; - - XFS_STATS_INC(xs_write_calls); - - if (outfilp->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; - - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return -EIO; - - xfs_ilock(ip, XFS_IOLOCK_EXCL); - - new_size = *ppos + count; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (new_size > ip->i_size) - ip->i_new_size = new_size; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - - trace_xfs_file_splice_write(ip, count, *ppos, ioflags); - - ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); - - xfs_aio_write_isize_update(inode, ppos, ret); - xfs_aio_write_newsize_update(ip); - xfs_iunlock(ip, XFS_IOLOCK_EXCL); - return ret; -} - -/* - * This routine is called to handle zeroing any space in the last - * block of the file that is beyond the EOF. We do this since the - * size is being increased without writing anything to that block - * and we don't want anyone to read the garbage on the disk. - */ -STATIC int /* error (positive) */ -xfs_zero_last_block( - xfs_inode_t *ip, - xfs_fsize_t offset, - xfs_fsize_t isize) -{ - xfs_fileoff_t last_fsb; - xfs_mount_t *mp = ip->i_mount; - int nimaps; - int zero_offset; - int zero_len; - int error = 0; - xfs_bmbt_irec_t imap; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - - zero_offset = XFS_B_FSB_OFFSET(mp, isize); - if (zero_offset == 0) { - /* - * There are no extra bytes in the last block on disk to - * zero, so return. - */ - return 0; - } - - last_fsb = XFS_B_TO_FSBT(mp, isize); - nimaps = 1; - error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, - &nimaps, NULL); - if (error) { - return error; - } - ASSERT(nimaps > 0); - /* - * If the block underlying isize is just a hole, then there - * is nothing to zero. - */ - if (imap.br_startblock == HOLESTARTBLOCK) { - return 0; - } - /* - * Zero the part of the last block beyond the EOF, and write it - * out sync. We need to drop the ilock while we do this so we - * don't deadlock when the buffer cache calls back to us. - */ - xfs_iunlock(ip, XFS_ILOCK_EXCL); - - zero_len = mp->m_sb.sb_blocksize - zero_offset; - if (isize + zero_len > offset) - zero_len = offset - isize; - error = xfs_iozero(ip, isize, zero_len); - - xfs_ilock(ip, XFS_ILOCK_EXCL); - ASSERT(error >= 0); - return error; -} - -/* - * Zero any on disk space between the current EOF and the new, - * larger EOF. This handles the normal case of zeroing the remainder - * of the last block in the file and the unusual case of zeroing blocks - * out beyond the size of the file. This second case only happens - * with fixed size extents and when the system crashes before the inode - * size was updated but after blocks were allocated. If fill is set, - * then any holes in the range are filled and zeroed. If not, the holes - * are left alone as holes. - */ - -int /* error (positive) */ -xfs_zero_eof( - xfs_inode_t *ip, - xfs_off_t offset, /* starting I/O offset */ - xfs_fsize_t isize) /* current inode size */ -{ - xfs_mount_t *mp = ip->i_mount; - xfs_fileoff_t start_zero_fsb; - xfs_fileoff_t end_zero_fsb; - xfs_fileoff_t zero_count_fsb; - xfs_fileoff_t last_fsb; - xfs_fileoff_t zero_off; - xfs_fsize_t zero_len; - int nimaps; - int error = 0; - xfs_bmbt_irec_t imap; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); - ASSERT(offset > isize); - - /* - * First handle zeroing the block on which isize resides. - * We only zero a part of that block so it is handled specially. - */ - error = xfs_zero_last_block(ip, offset, isize); - if (error) { - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); - return error; - } - - /* - * Calculate the range between the new size and the old - * where blocks needing to be zeroed may exist. To get the - * block where the last byte in the file currently resides, - * we need to subtract one from the size and truncate back - * to a block boundary. We subtract 1 in case the size is - * exactly on a block boundary. - */ - last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; - start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); - end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); - ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); - if (last_fsb == end_zero_fsb) { - /* - * The size was only incremented on its last block. - * We took care of that above, so just return. - */ - return 0; - } - - ASSERT(start_zero_fsb <= end_zero_fsb); - while (start_zero_fsb <= end_zero_fsb) { - nimaps = 1; - zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; - error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, - 0, NULL, 0, &imap, &nimaps, NULL); - if (error) { - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); - return error; - } - ASSERT(nimaps > 0); - - if (imap.br_state == XFS_EXT_UNWRITTEN || - imap.br_startblock == HOLESTARTBLOCK) { - /* - * This loop handles initializing pages that were - * partially initialized by the code below this - * loop. It basically zeroes the part of the page - * that sits on a hole and sets the page as P_HOLE - * and calls remapf if it is a mapped file. - */ - start_zero_fsb = imap.br_startoff + imap.br_blockcount; - ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); - continue; - } - - /* - * There are blocks we need to zero. - * Drop the inode lock while we're doing the I/O. - * We'll still have the iolock to protect us. - */ - xfs_iunlock(ip, XFS_ILOCK_EXCL); - - zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); - zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); - - if ((zero_off + zero_len) > offset) - zero_len = offset - zero_off; - - error = xfs_iozero(ip, zero_off, zero_len); - if (error) { - goto out_lock; - } - - start_zero_fsb = imap.br_startoff + imap.br_blockcount; - ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); - - xfs_ilock(ip, XFS_ILOCK_EXCL); - } - - return 0; - -out_lock: - xfs_ilock(ip, XFS_ILOCK_EXCL); - ASSERT(error >= 0); - return error; -} - -/* - * Common pre-write limit and setup checks. - * - * Returns with iolock held according to @iolock. - */ -STATIC ssize_t -xfs_file_aio_write_checks( - struct file *file, - loff_t *pos, - size_t *count, - int *iolock) -{ - struct inode *inode = file->f_mapping->host; - struct xfs_inode *ip = XFS_I(inode); - xfs_fsize_t new_size; - int error = 0; - - error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); - if (error) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); - *iolock = 0; - return error; - } - - new_size = *pos + *count; - if (new_size > ip->i_size) - ip->i_new_size = new_size; - - if (likely(!(file->f_mode & FMODE_NOCMTIME))) - file_update_time(file); - - /* - * If the offset is beyond the size of the file, we need to zero any - * blocks that fall between the existing EOF and the start of this - * write. - */ - if (*pos > ip->i_size) - error = -xfs_zero_eof(ip, *pos, ip->i_size); - - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - if (error) - return error; - - /* - * If we're writing the file then make sure to clear the setuid and - * setgid bits if the process is not being run by root. This keeps - * people from modifying setuid and setgid binaries. - */ - return file_remove_suid(file); - -} - -/* - * xfs_file_dio_aio_write - handle direct IO writes - * - * Lock the inode appropriately to prepare for and issue a direct IO write. - * By separating it from the buffered write path we remove all the tricky to - * follow locking changes and looping. - * - * If there are cached pages or we're extending the file, we need IOLOCK_EXCL - * until we're sure the bytes at the new EOF have been zeroed and/or the cached - * pages are flushed out. - * - * In most cases the direct IO writes will be done holding IOLOCK_SHARED - * allowing them to be done in parallel with reads and other direct IO writes. - * However, if the IO is not aligned to filesystem blocks, the direct IO layer - * needs to do sub-block zeroing and that requires serialisation against other - * direct IOs to the same block. In this case we need to serialise the - * submission of the unaligned IOs so that we don't get racing block zeroing in - * the dio layer. To avoid the problem with aio, we also need to wait for - * outstanding IOs to complete so that unwritten extent conversion is completed - * before we try to map the overlapping block. This is currently implemented by - * hitting it with a big hammer (i.e. xfs_ioend_wait()). - * - * Returns with locks held indicated by @iolock and errors indicated by - * negative return values. - */ -STATIC ssize_t -xfs_file_dio_aio_write( - struct kiocb *iocb, - const struct iovec *iovp, - unsigned long nr_segs, - loff_t pos, - size_t ocount, - int *iolock) -{ - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - ssize_t ret = 0; - size_t count = ocount; - int unaligned_io = 0; - struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? - mp->m_rtdev_targp : mp->m_ddev_targp; - - *iolock = 0; - if ((pos & target->bt_smask) || (count & target->bt_smask)) - return -XFS_ERROR(EINVAL); - - if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) - unaligned_io = 1; - - if (unaligned_io || mapping->nrpages || pos > ip->i_size) - *iolock = XFS_IOLOCK_EXCL; - else - *iolock = XFS_IOLOCK_SHARED; - xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); - - ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); - if (ret) - return ret; - - if (mapping->nrpages) { - WARN_ON(*iolock != XFS_IOLOCK_EXCL); - ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, - FI_REMAPF_LOCKED); - if (ret) - return ret; - } - - /* - * If we are doing unaligned IO, wait for all other IO to drain, - * otherwise demote the lock if we had to flush cached pages - */ - if (unaligned_io) - xfs_ioend_wait(ip); - else if (*iolock == XFS_IOLOCK_EXCL) { - xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); - *iolock = XFS_IOLOCK_SHARED; - } - - trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); - ret = generic_file_direct_write(iocb, iovp, - &nr_segs, pos, &iocb->ki_pos, count, ocount); - - /* No fallback to buffered IO on errors for XFS. */ - ASSERT(ret < 0 || ret == count); - return ret; -} - -STATIC ssize_t -xfs_file_buffered_aio_write( - struct kiocb *iocb, - const struct iovec *iovp, - unsigned long nr_segs, - loff_t pos, - size_t ocount, - int *iolock) -{ - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - struct xfs_inode *ip = XFS_I(inode); - ssize_t ret; - int enospc = 0; - size_t count = ocount; - - *iolock = XFS_IOLOCK_EXCL; - xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); - - ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); - if (ret) - return ret; - - /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; - -write_retry: - trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); - ret = generic_file_buffered_write(iocb, iovp, nr_segs, - pos, &iocb->ki_pos, count, ret); - /* - * if we just got an ENOSPC, flush the inode now we aren't holding any - * page locks and retry *once* - */ - if (ret == -ENOSPC && !enospc) { - ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); - if (ret) - return ret; - enospc = 1; - goto write_retry; - } - current->backing_dev_info = NULL; - return ret; -} - -STATIC ssize_t -xfs_file_aio_write( - struct kiocb *iocb, - const struct iovec *iovp, - unsigned long nr_segs, - loff_t pos) -{ - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - struct xfs_inode *ip = XFS_I(inode); - ssize_t ret; - int iolock; - size_t ocount = 0; - - XFS_STATS_INC(xs_write_calls); - - BUG_ON(iocb->ki_pos != pos); - - ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); - if (ret) - return ret; - - if (ocount == 0) - return 0; - - xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); - - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return -EIO; - - if (unlikely(file->f_flags & O_DIRECT)) - ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, - ocount, &iolock); - else - ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, - ocount, &iolock); - - xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); - - if (ret <= 0) - goto out_unlock; - - /* Handle various SYNC-type writes */ - if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { - loff_t end = pos + ret - 1; - int error; - - xfs_rw_iunlock(ip, iolock); - error = xfs_file_fsync(file, pos, end, - (file->f_flags & __O_SYNC) ? 0 : 1); - xfs_rw_ilock(ip, iolock); - if (error) - ret = error; - } - -out_unlock: - xfs_aio_write_newsize_update(ip); - xfs_rw_iunlock(ip, iolock); - return ret; -} - -STATIC long -xfs_file_fallocate( - struct file *file, - int mode, - loff_t offset, - loff_t len) -{ - struct inode *inode = file->f_path.dentry->d_inode; - long error; - loff_t new_size = 0; - xfs_flock64_t bf; - xfs_inode_t *ip = XFS_I(inode); - int cmd = XFS_IOC_RESVSP; - int attr_flags = XFS_ATTR_NOLOCK; - - if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) - return -EOPNOTSUPP; - - bf.l_whence = 0; - bf.l_start = offset; - bf.l_len = len; - - xfs_ilock(ip, XFS_IOLOCK_EXCL); - - if (mode & FALLOC_FL_PUNCH_HOLE) - cmd = XFS_IOC_UNRESVSP; - - /* check the new inode size is valid before allocating */ - if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { - new_size = offset + len; - error = inode_newsize_ok(inode, new_size); - if (error) - goto out_unlock; - } - - if (file->f_flags & O_DSYNC) - attr_flags |= XFS_ATTR_SYNC; - - error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); - if (error) - goto out_unlock; - - /* Change file size if needed */ - if (new_size) { - struct iattr iattr; - - iattr.ia_valid = ATTR_SIZE; - iattr.ia_size = new_size; - error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); - } - -out_unlock: - xfs_iunlock(ip, XFS_IOLOCK_EXCL); - return error; -} - - -STATIC int -xfs_file_open( - struct inode *inode, - struct file *file) -{ - if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) - return -EFBIG; - if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) - return -EIO; - return 0; -} - -STATIC int -xfs_dir_open( - struct inode *inode, - struct file *file) -{ - struct xfs_inode *ip = XFS_I(inode); - int mode; - int error; - - error = xfs_file_open(inode, file); - if (error) - return error; - - /* - * If there are any blocks, read-ahead block 0 as we're almost - * certain to have the next operation be a read there. - */ - mode = xfs_ilock_map_shared(ip); - if (ip->i_d.di_nextents > 0) - xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); - xfs_iunlock(ip, mode); - return 0; -} - -STATIC int -xfs_file_release( - struct inode *inode, - struct file *filp) -{ - return -xfs_release(XFS_I(inode)); -} - -STATIC int -xfs_file_readdir( - struct file *filp, - void *dirent, - filldir_t filldir) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - xfs_inode_t *ip = XFS_I(inode); - int error; - size_t bufsize; - - /* - * The Linux API doesn't pass down the total size of the buffer - * we read into down to the filesystem. With the filldir concept - * it's not needed for correct information, but the XFS dir2 leaf - * code wants an estimate of the buffer size to calculate it's - * readahead window and size the buffers used for mapping to - * physical blocks. - * - * Try to give it an estimate that's good enough, maybe at some - * point we can change the ->readdir prototype to include the - * buffer size. For now we use the current glibc buffer size. - */ - bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); - - error = xfs_readdir(ip, dirent, bufsize, - (xfs_off_t *)&filp->f_pos, filldir); - if (error) - return -error; - return 0; -} - -STATIC int -xfs_file_mmap( - struct file *filp, - struct vm_area_struct *vma) -{ - vma->vm_ops = &xfs_file_vm_ops; - vma->vm_flags |= VM_CAN_NONLINEAR; - - file_accessed(filp); - return 0; -} - -/* - * mmap()d file has taken write protection fault and is being made - * writable. We can set the page state up correctly for a writable - * page, which means we can do correct delalloc accounting (ENOSPC - * checking!) and unwritten extent mapping. - */ -STATIC int -xfs_vm_page_mkwrite( - struct vm_area_struct *vma, - struct vm_fault *vmf) -{ - return block_page_mkwrite(vma, vmf, xfs_get_blocks); -} - -const struct file_operations xfs_file_operations = { - .llseek = generic_file_llseek, - .read = do_sync_read, - .write = do_sync_write, - .aio_read = xfs_file_aio_read, - .aio_write = xfs_file_aio_write, - .splice_read = xfs_file_splice_read, - .splice_write = xfs_file_splice_write, - .unlocked_ioctl = xfs_file_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = xfs_file_compat_ioctl, -#endif - .mmap = xfs_file_mmap, - .open = xfs_file_open, - .release = xfs_file_release, - .fsync = xfs_file_fsync, - .fallocate = xfs_file_fallocate, -}; - -const struct file_operations xfs_dir_file_operations = { - .open = xfs_dir_open, - .read = generic_read_dir, - .readdir = xfs_file_readdir, - .llseek = generic_file_llseek, - .unlocked_ioctl = xfs_file_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = xfs_file_compat_ioctl, -#endif - .fsync = xfs_file_fsync, -}; - -static const struct vm_operations_struct xfs_file_vm_ops = { - .fault = filemap_fault, - .page_mkwrite = xfs_vm_page_mkwrite, -}; diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c deleted file mode 100644 index ed88ed1..0000000 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_vnodeops.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_trace.h" - -/* - * note: all filemap functions return negative error codes. These - * need to be inverted before returning to the xfs core functions. - */ -void -xfs_tosspages( - xfs_inode_t *ip, - xfs_off_t first, - xfs_off_t last, - int fiopt) -{ - /* can't toss partial tail pages, so mask them out */ - last &= ~(PAGE_SIZE - 1); - truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1); -} - -int -xfs_flushinval_pages( - xfs_inode_t *ip, - xfs_off_t first, - xfs_off_t last, - int fiopt) -{ - struct address_space *mapping = VFS_I(ip)->i_mapping; - int ret = 0; - - trace_xfs_pagecache_inval(ip, first, last); - - xfs_iflags_clear(ip, XFS_ITRUNCATED); - ret = filemap_write_and_wait_range(mapping, first, - last == -1 ? LLONG_MAX : last); - if (!ret) - truncate_inode_pages_range(mapping, first, last); - return -ret; -} - -int -xfs_flush_pages( - xfs_inode_t *ip, - xfs_off_t first, - xfs_off_t last, - uint64_t flags, - int fiopt) -{ - struct address_space *mapping = VFS_I(ip)->i_mapping; - int ret = 0; - int ret2; - - xfs_iflags_clear(ip, XFS_ITRUNCATED); - ret = -filemap_fdatawrite_range(mapping, first, - last == -1 ? LLONG_MAX : last); - if (flags & XBF_ASYNC) - return ret; - ret2 = xfs_wait_on_pages(ip, first, last); - if (!ret) - ret = ret2; - return ret; -} - -int -xfs_wait_on_pages( - xfs_inode_t *ip, - xfs_off_t first, - xfs_off_t last) -{ - struct address_space *mapping = VFS_I(ip)->i_mapping; - - if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { - return -filemap_fdatawait_range(mapping, first, - last == -1 ? ip->i_size - 1 : last); - } - return 0; -} diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c deleted file mode 100644 index 76e81cf..0000000 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_sysctl.h" - -/* - * Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n, - * other XFS code uses these values. Times are measured in centisecs (i.e. - * 100ths of a second). - */ -xfs_param_t xfs_params = { - /* MIN DFLT MAX */ - .sgid_inherit = { 0, 0, 1 }, - .symlink_mode = { 0, 0, 1 }, - .panic_mask = { 0, 0, 255 }, - .error_level = { 0, 3, 11 }, - .syncd_timer = { 1*100, 30*100, 7200*100}, - .stats_clear = { 0, 0, 1 }, - .inherit_sync = { 0, 1, 1 }, - .inherit_nodump = { 0, 1, 1 }, - .inherit_noatim = { 0, 1, 1 }, - .xfs_buf_timer = { 100/2, 1*100, 30*100 }, - .xfs_buf_age = { 1*100, 15*100, 7200*100}, - .inherit_nosym = { 0, 0, 1 }, - .rotorstep = { 1, 1, 255 }, - .inherit_nodfrg = { 0, 1, 1 }, - .fstrm_timer = { 1, 30*100, 3600*100}, -}; diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c deleted file mode 100644 index f7ce7de..0000000 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ /dev/null @@ -1,1556 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_ioctl.h" -#include "xfs_rtalloc.h" -#include "xfs_itable.h" -#include "xfs_error.h" -#include "xfs_attr.h" -#include "xfs_bmap.h" -#include "xfs_buf_item.h" -#include "xfs_utils.h" -#include "xfs_dfrag.h" -#include "xfs_fsops.h" -#include "xfs_vnodeops.h" -#include "xfs_discard.h" -#include "xfs_quota.h" -#include "xfs_inode_item.h" -#include "xfs_export.h" -#include "xfs_trace.h" - -#include -#include -#include -#include -#include -#include -#include - -/* - * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to - * a file or fs handle. - * - * XFS_IOC_PATH_TO_FSHANDLE - * returns fs handle for a mount point or path within that mount point - * XFS_IOC_FD_TO_HANDLE - * returns full handle for a FD opened in user space - * XFS_IOC_PATH_TO_HANDLE - * returns full handle for a path - */ -int -xfs_find_handle( - unsigned int cmd, - xfs_fsop_handlereq_t *hreq) -{ - int hsize; - xfs_handle_t handle; - struct inode *inode; - struct file *file = NULL; - struct path path; - int error; - struct xfs_inode *ip; - - if (cmd == XFS_IOC_FD_TO_HANDLE) { - file = fget(hreq->fd); - if (!file) - return -EBADF; - inode = file->f_path.dentry->d_inode; - } else { - error = user_lpath((const char __user *)hreq->path, &path); - if (error) - return error; - inode = path.dentry->d_inode; - } - ip = XFS_I(inode); - - /* - * We can only generate handles for inodes residing on a XFS filesystem, - * and only for regular files, directories or symbolic links. - */ - error = -EINVAL; - if (inode->i_sb->s_magic != XFS_SB_MAGIC) - goto out_put; - - error = -EBADF; - if (!S_ISREG(inode->i_mode) && - !S_ISDIR(inode->i_mode) && - !S_ISLNK(inode->i_mode)) - goto out_put; - - - memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); - - if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { - /* - * This handle only contains an fsid, zero the rest. - */ - memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); - hsize = sizeof(xfs_fsid_t); - } else { - int lock_mode; - - lock_mode = xfs_ilock_map_shared(ip); - handle.ha_fid.fid_len = sizeof(xfs_fid_t) - - sizeof(handle.ha_fid.fid_len); - handle.ha_fid.fid_pad = 0; - handle.ha_fid.fid_gen = ip->i_d.di_gen; - handle.ha_fid.fid_ino = ip->i_ino; - xfs_iunlock_map_shared(ip, lock_mode); - - hsize = XFS_HSIZE(handle); - } - - error = -EFAULT; - if (copy_to_user(hreq->ohandle, &handle, hsize) || - copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) - goto out_put; - - error = 0; - - out_put: - if (cmd == XFS_IOC_FD_TO_HANDLE) - fput(file); - else - path_put(&path); - return error; -} - -/* - * No need to do permission checks on the various pathname components - * as the handle operations are privileged. - */ -STATIC int -xfs_handle_acceptable( - void *context, - struct dentry *dentry) -{ - return 1; -} - -/* - * Convert userspace handle data into a dentry. - */ -struct dentry * -xfs_handle_to_dentry( - struct file *parfilp, - void __user *uhandle, - u32 hlen) -{ - xfs_handle_t handle; - struct xfs_fid64 fid; - - /* - * Only allow handle opens under a directory. - */ - if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode)) - return ERR_PTR(-ENOTDIR); - - if (hlen != sizeof(xfs_handle_t)) - return ERR_PTR(-EINVAL); - if (copy_from_user(&handle, uhandle, hlen)) - return ERR_PTR(-EFAULT); - if (handle.ha_fid.fid_len != - sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) - return ERR_PTR(-EINVAL); - - memset(&fid, 0, sizeof(struct fid)); - fid.ino = handle.ha_fid.fid_ino; - fid.gen = handle.ha_fid.fid_gen; - - return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, - FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, - xfs_handle_acceptable, NULL); -} - -STATIC struct dentry * -xfs_handlereq_to_dentry( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq) -{ - return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); -} - -int -xfs_open_by_handle( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq) -{ - const struct cred *cred = current_cred(); - int error; - int fd; - int permflag; - struct file *filp; - struct inode *inode; - struct dentry *dentry; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - - dentry = xfs_handlereq_to_dentry(parfilp, hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - inode = dentry->d_inode; - - /* Restrict xfs_open_by_handle to directories & regular files. */ - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { - error = -XFS_ERROR(EPERM); - goto out_dput; - } - -#if BITS_PER_LONG != 32 - hreq->oflags |= O_LARGEFILE; -#endif - - /* Put open permission in namei format. */ - permflag = hreq->oflags; - if ((permflag+1) & O_ACCMODE) - permflag++; - if (permflag & O_TRUNC) - permflag |= 2; - - if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && - (permflag & FMODE_WRITE) && IS_APPEND(inode)) { - error = -XFS_ERROR(EPERM); - goto out_dput; - } - - if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) { - error = -XFS_ERROR(EACCES); - goto out_dput; - } - - /* Can't write directories. */ - if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { - error = -XFS_ERROR(EISDIR); - goto out_dput; - } - - fd = get_unused_fd(); - if (fd < 0) { - error = fd; - goto out_dput; - } - - filp = dentry_open(dentry, mntget(parfilp->f_path.mnt), - hreq->oflags, cred); - if (IS_ERR(filp)) { - put_unused_fd(fd); - return PTR_ERR(filp); - } - - if (S_ISREG(inode->i_mode)) { - filp->f_flags |= O_NOATIME; - filp->f_mode |= FMODE_NOCMTIME; - } - - fd_install(fd, filp); - return fd; - - out_dput: - dput(dentry); - return error; -} - -/* - * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's - * unused first argument. - */ -STATIC int -do_readlink( - char __user *buffer, - int buflen, - const char *link) -{ - int len; - - len = PTR_ERR(link); - if (IS_ERR(link)) - goto out; - - len = strlen(link); - if (len > (unsigned) buflen) - len = buflen; - if (copy_to_user(buffer, link, len)) - len = -EFAULT; - out: - return len; -} - - -int -xfs_readlink_by_handle( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq) -{ - struct dentry *dentry; - __u32 olen; - void *link; - int error; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - - dentry = xfs_handlereq_to_dentry(parfilp, hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - /* Restrict this handle operation to symlinks only. */ - if (!S_ISLNK(dentry->d_inode->i_mode)) { - error = -XFS_ERROR(EINVAL); - goto out_dput; - } - - if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { - error = -XFS_ERROR(EFAULT); - goto out_dput; - } - - link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); - if (!link) { - error = -XFS_ERROR(ENOMEM); - goto out_dput; - } - - error = -xfs_readlink(XFS_I(dentry->d_inode), link); - if (error) - goto out_kfree; - error = do_readlink(hreq->ohandle, olen, link); - if (error) - goto out_kfree; - - out_kfree: - kfree(link); - out_dput: - dput(dentry); - return error; -} - -STATIC int -xfs_fssetdm_by_handle( - struct file *parfilp, - void __user *arg) -{ - int error; - struct fsdmidata fsd; - xfs_fsop_setdm_handlereq_t dmhreq; - struct dentry *dentry; - - if (!capable(CAP_MKNOD)) - return -XFS_ERROR(EPERM); - if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) - return -XFS_ERROR(EFAULT); - - dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { - error = -XFS_ERROR(EPERM); - goto out; - } - - if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { - error = -XFS_ERROR(EFAULT); - goto out; - } - - error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, - fsd.fsd_dmstate); - - out: - dput(dentry); - return error; -} - -STATIC int -xfs_attrlist_by_handle( - struct file *parfilp, - void __user *arg) -{ - int error = -ENOMEM; - attrlist_cursor_kern_t *cursor; - xfs_fsop_attrlist_handlereq_t al_hreq; - struct dentry *dentry; - char *kbuf; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) - return -XFS_ERROR(EFAULT); - if (al_hreq.buflen > XATTR_LIST_MAX) - return -XFS_ERROR(EINVAL); - - /* - * Reject flags, only allow namespaces. - */ - if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) - return -XFS_ERROR(EINVAL); - - dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL); - if (!kbuf) - goto out_dput; - - cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; - error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, - al_hreq.flags, cursor); - if (error) - goto out_kfree; - - if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) - error = -EFAULT; - - out_kfree: - kfree(kbuf); - out_dput: - dput(dentry); - return error; -} - -int -xfs_attrmulti_attr_get( - struct inode *inode, - unsigned char *name, - unsigned char __user *ubuf, - __uint32_t *len, - __uint32_t flags) -{ - unsigned char *kbuf; - int error = EFAULT; - - if (*len > XATTR_SIZE_MAX) - return EINVAL; - kbuf = kmalloc(*len, GFP_KERNEL); - if (!kbuf) - return ENOMEM; - - error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); - if (error) - goto out_kfree; - - if (copy_to_user(ubuf, kbuf, *len)) - error = EFAULT; - - out_kfree: - kfree(kbuf); - return error; -} - -int -xfs_attrmulti_attr_set( - struct inode *inode, - unsigned char *name, - const unsigned char __user *ubuf, - __uint32_t len, - __uint32_t flags) -{ - unsigned char *kbuf; - int error = EFAULT; - - if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) - return EPERM; - if (len > XATTR_SIZE_MAX) - return EINVAL; - - kbuf = memdup_user(ubuf, len); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); - - return error; -} - -int -xfs_attrmulti_attr_remove( - struct inode *inode, - unsigned char *name, - __uint32_t flags) -{ - if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) - return EPERM; - return xfs_attr_remove(XFS_I(inode), name, flags); -} - -STATIC int -xfs_attrmulti_by_handle( - struct file *parfilp, - void __user *arg) -{ - int error; - xfs_attr_multiop_t *ops; - xfs_fsop_attrmulti_handlereq_t am_hreq; - struct dentry *dentry; - unsigned int i, size; - unsigned char *attr_name; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) - return -XFS_ERROR(EFAULT); - - /* overflow check */ - if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) - return -E2BIG; - - dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - error = E2BIG; - size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); - if (!size || size > 16 * PAGE_SIZE) - goto out_dput; - - ops = memdup_user(am_hreq.ops, size); - if (IS_ERR(ops)) { - error = PTR_ERR(ops); - goto out_dput; - } - - attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); - if (!attr_name) - goto out_kfree_ops; - - error = 0; - for (i = 0; i < am_hreq.opcount; i++) { - ops[i].am_error = strncpy_from_user((char *)attr_name, - ops[i].am_attrname, MAXNAMELEN); - if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) - error = -ERANGE; - if (ops[i].am_error < 0) - break; - - switch (ops[i].am_opcode) { - case ATTR_OP_GET: - ops[i].am_error = xfs_attrmulti_attr_get( - dentry->d_inode, attr_name, - ops[i].am_attrvalue, &ops[i].am_length, - ops[i].am_flags); - break; - case ATTR_OP_SET: - ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); - if (ops[i].am_error) - break; - ops[i].am_error = xfs_attrmulti_attr_set( - dentry->d_inode, attr_name, - ops[i].am_attrvalue, ops[i].am_length, - ops[i].am_flags); - mnt_drop_write(parfilp->f_path.mnt); - break; - case ATTR_OP_REMOVE: - ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); - if (ops[i].am_error) - break; - ops[i].am_error = xfs_attrmulti_attr_remove( - dentry->d_inode, attr_name, - ops[i].am_flags); - mnt_drop_write(parfilp->f_path.mnt); - break; - default: - ops[i].am_error = EINVAL; - } - } - - if (copy_to_user(am_hreq.ops, ops, size)) - error = XFS_ERROR(EFAULT); - - kfree(attr_name); - out_kfree_ops: - kfree(ops); - out_dput: - dput(dentry); - return -error; -} - -int -xfs_ioc_space( - struct xfs_inode *ip, - struct inode *inode, - struct file *filp, - int ioflags, - unsigned int cmd, - xfs_flock64_t *bf) -{ - int attr_flags = 0; - int error; - - /* - * Only allow the sys admin to reserve space unless - * unwritten extents are enabled. - */ - if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && - !capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - - if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) - return -XFS_ERROR(EPERM); - - if (!(filp->f_mode & FMODE_WRITE)) - return -XFS_ERROR(EBADF); - - if (!S_ISREG(inode->i_mode)) - return -XFS_ERROR(EINVAL); - - if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) - attr_flags |= XFS_ATTR_NONBLOCK; - - if (filp->f_flags & O_DSYNC) - attr_flags |= XFS_ATTR_SYNC; - - if (ioflags & IO_INVIS) - attr_flags |= XFS_ATTR_DMI; - - error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags); - return -error; -} - -STATIC int -xfs_ioc_bulkstat( - xfs_mount_t *mp, - unsigned int cmd, - void __user *arg) -{ - xfs_fsop_bulkreq_t bulkreq; - int count; /* # of records returned */ - xfs_ino_t inlast; /* last inode number */ - int done; - int error; - - /* done = 1 if there are more stats to get and if bulkstat */ - /* should be called again (unused here, but used in dmapi) */ - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (XFS_FORCED_SHUTDOWN(mp)) - return -XFS_ERROR(EIO); - - if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) - return -XFS_ERROR(EFAULT); - - if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) - return -XFS_ERROR(EFAULT); - - if ((count = bulkreq.icount) <= 0) - return -XFS_ERROR(EINVAL); - - if (bulkreq.ubuffer == NULL) - return -XFS_ERROR(EINVAL); - - if (cmd == XFS_IOC_FSINUMBERS) - error = xfs_inumbers(mp, &inlast, &count, - bulkreq.ubuffer, xfs_inumbers_fmt); - else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) - error = xfs_bulkstat_single(mp, &inlast, - bulkreq.ubuffer, &done); - else /* XFS_IOC_FSBULKSTAT */ - error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, - sizeof(xfs_bstat_t), bulkreq.ubuffer, - &done); - - if (error) - return -error; - - if (bulkreq.ocount != NULL) { - if (copy_to_user(bulkreq.lastip, &inlast, - sizeof(xfs_ino_t))) - return -XFS_ERROR(EFAULT); - - if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) - return -XFS_ERROR(EFAULT); - } - - return 0; -} - -STATIC int -xfs_ioc_fsgeometry_v1( - xfs_mount_t *mp, - void __user *arg) -{ - xfs_fsop_geom_t fsgeo; - int error; - - error = xfs_fs_geometry(mp, &fsgeo, 3); - if (error) - return -error; - - /* - * Caller should have passed an argument of type - * xfs_fsop_geom_v1_t. This is a proper subset of the - * xfs_fsop_geom_t that xfs_fs_geometry() fills in. - */ - if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) - return -XFS_ERROR(EFAULT); - return 0; -} - -STATIC int -xfs_ioc_fsgeometry( - xfs_mount_t *mp, - void __user *arg) -{ - xfs_fsop_geom_t fsgeo; - int error; - - error = xfs_fs_geometry(mp, &fsgeo, 4); - if (error) - return -error; - - if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) - return -XFS_ERROR(EFAULT); - return 0; -} - -/* - * Linux extended inode flags interface. - */ - -STATIC unsigned int -xfs_merge_ioc_xflags( - unsigned int flags, - unsigned int start) -{ - unsigned int xflags = start; - - if (flags & FS_IMMUTABLE_FL) - xflags |= XFS_XFLAG_IMMUTABLE; - else - xflags &= ~XFS_XFLAG_IMMUTABLE; - if (flags & FS_APPEND_FL) - xflags |= XFS_XFLAG_APPEND; - else - xflags &= ~XFS_XFLAG_APPEND; - if (flags & FS_SYNC_FL) - xflags |= XFS_XFLAG_SYNC; - else - xflags &= ~XFS_XFLAG_SYNC; - if (flags & FS_NOATIME_FL) - xflags |= XFS_XFLAG_NOATIME; - else - xflags &= ~XFS_XFLAG_NOATIME; - if (flags & FS_NODUMP_FL) - xflags |= XFS_XFLAG_NODUMP; - else - xflags &= ~XFS_XFLAG_NODUMP; - - return xflags; -} - -STATIC unsigned int -xfs_di2lxflags( - __uint16_t di_flags) -{ - unsigned int flags = 0; - - if (di_flags & XFS_DIFLAG_IMMUTABLE) - flags |= FS_IMMUTABLE_FL; - if (di_flags & XFS_DIFLAG_APPEND) - flags |= FS_APPEND_FL; - if (di_flags & XFS_DIFLAG_SYNC) - flags |= FS_SYNC_FL; - if (di_flags & XFS_DIFLAG_NOATIME) - flags |= FS_NOATIME_FL; - if (di_flags & XFS_DIFLAG_NODUMP) - flags |= FS_NODUMP_FL; - return flags; -} - -STATIC int -xfs_ioc_fsgetxattr( - xfs_inode_t *ip, - int attr, - void __user *arg) -{ - struct fsxattr fa; - - memset(&fa, 0, sizeof(struct fsxattr)); - - xfs_ilock(ip, XFS_ILOCK_SHARED); - fa.fsx_xflags = xfs_ip2xflags(ip); - fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; - fa.fsx_projid = xfs_get_projid(ip); - - if (attr) { - if (ip->i_afp) { - if (ip->i_afp->if_flags & XFS_IFEXTENTS) - fa.fsx_nextents = ip->i_afp->if_bytes / - sizeof(xfs_bmbt_rec_t); - else - fa.fsx_nextents = ip->i_d.di_anextents; - } else - fa.fsx_nextents = 0; - } else { - if (ip->i_df.if_flags & XFS_IFEXTENTS) - fa.fsx_nextents = ip->i_df.if_bytes / - sizeof(xfs_bmbt_rec_t); - else - fa.fsx_nextents = ip->i_d.di_nextents; - } - xfs_iunlock(ip, XFS_ILOCK_SHARED); - - if (copy_to_user(arg, &fa, sizeof(fa))) - return -EFAULT; - return 0; -} - -STATIC void -xfs_set_diflags( - struct xfs_inode *ip, - unsigned int xflags) -{ - unsigned int di_flags; - - /* can't set PREALLOC this way, just preserve it */ - di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); - if (xflags & XFS_XFLAG_IMMUTABLE) - di_flags |= XFS_DIFLAG_IMMUTABLE; - if (xflags & XFS_XFLAG_APPEND) - di_flags |= XFS_DIFLAG_APPEND; - if (xflags & XFS_XFLAG_SYNC) - di_flags |= XFS_DIFLAG_SYNC; - if (xflags & XFS_XFLAG_NOATIME) - di_flags |= XFS_DIFLAG_NOATIME; - if (xflags & XFS_XFLAG_NODUMP) - di_flags |= XFS_DIFLAG_NODUMP; - if (xflags & XFS_XFLAG_PROJINHERIT) - di_flags |= XFS_DIFLAG_PROJINHERIT; - if (xflags & XFS_XFLAG_NODEFRAG) - di_flags |= XFS_DIFLAG_NODEFRAG; - if (xflags & XFS_XFLAG_FILESTREAM) - di_flags |= XFS_DIFLAG_FILESTREAM; - if (S_ISDIR(ip->i_d.di_mode)) { - if (xflags & XFS_XFLAG_RTINHERIT) - di_flags |= XFS_DIFLAG_RTINHERIT; - if (xflags & XFS_XFLAG_NOSYMLINKS) - di_flags |= XFS_DIFLAG_NOSYMLINKS; - if (xflags & XFS_XFLAG_EXTSZINHERIT) - di_flags |= XFS_DIFLAG_EXTSZINHERIT; - } else if (S_ISREG(ip->i_d.di_mode)) { - if (xflags & XFS_XFLAG_REALTIME) - di_flags |= XFS_DIFLAG_REALTIME; - if (xflags & XFS_XFLAG_EXTSIZE) - di_flags |= XFS_DIFLAG_EXTSIZE; - } - - ip->i_d.di_flags = di_flags; -} - -STATIC void -xfs_diflags_to_linux( - struct xfs_inode *ip) -{ - struct inode *inode = VFS_I(ip); - unsigned int xflags = xfs_ip2xflags(ip); - - if (xflags & XFS_XFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; - if (xflags & XFS_XFLAG_APPEND) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; - if (xflags & XFS_XFLAG_SYNC) - inode->i_flags |= S_SYNC; - else - inode->i_flags &= ~S_SYNC; - if (xflags & XFS_XFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - else - inode->i_flags &= ~S_NOATIME; -} - -#define FSX_PROJID 1 -#define FSX_EXTSIZE 2 -#define FSX_XFLAGS 4 -#define FSX_NONBLOCK 8 - -STATIC int -xfs_ioctl_setattr( - xfs_inode_t *ip, - struct fsxattr *fa, - int mask) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - unsigned int lock_flags = 0; - struct xfs_dquot *udqp = NULL; - struct xfs_dquot *gdqp = NULL; - struct xfs_dquot *olddquot = NULL; - int code; - - trace_xfs_ioctl_setattr(ip); - - if (mp->m_flags & XFS_MOUNT_RDONLY) - return XFS_ERROR(EROFS); - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - /* - * Disallow 32bit project ids when projid32bit feature is not enabled. - */ - if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) && - !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) - return XFS_ERROR(EINVAL); - - /* - * If disk quotas is on, we make sure that the dquots do exist on disk, - * before we start any other transactions. Trying to do this later - * is messy. We don't care to take a readlock to look at the ids - * in inode here, because we can't hold it across the trans_reserve. - * If the IDs do change before we take the ilock, we're covered - * because the i_*dquot fields will get updated anyway. - */ - if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { - code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, - ip->i_d.di_gid, fa->fsx_projid, - XFS_QMOPT_PQUOTA, &udqp, &gdqp); - if (code) - return code; - } - - /* - * For the other attributes, we acquire the inode lock and - * first do an error checking pass. - */ - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); - code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); - if (code) - goto error_return; - - lock_flags = XFS_ILOCK_EXCL; - xfs_ilock(ip, lock_flags); - - /* - * CAP_FOWNER overrides the following restrictions: - * - * The user ID of the calling process must be equal - * to the file owner ID, except in cases where the - * CAP_FSETID capability is applicable. - */ - if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) { - code = XFS_ERROR(EPERM); - goto error_return; - } - - /* - * Do a quota reservation only if projid is actually going to change. - */ - if (mask & FSX_PROJID) { - if (XFS_IS_QUOTA_RUNNING(mp) && - XFS_IS_PQUOTA_ON(mp) && - xfs_get_projid(ip) != fa->fsx_projid) { - ASSERT(tp); - code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, - capable(CAP_FOWNER) ? - XFS_QMOPT_FORCE_RES : 0); - if (code) /* out of quota */ - goto error_return; - } - } - - if (mask & FSX_EXTSIZE) { - /* - * Can't change extent size if any extents are allocated. - */ - if (ip->i_d.di_nextents && - ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != - fa->fsx_extsize)) { - code = XFS_ERROR(EINVAL); /* EFBIG? */ - goto error_return; - } - - /* - * Extent size must be a multiple of the appropriate block - * size, if set at all. It must also be smaller than the - * maximum extent size supported by the filesystem. - * - * Also, for non-realtime files, limit the extent size hint to - * half the size of the AGs in the filesystem so alignment - * doesn't result in extents larger than an AG. - */ - if (fa->fsx_extsize != 0) { - xfs_extlen_t size; - xfs_fsblock_t extsize_fsb; - - extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); - if (extsize_fsb > MAXEXTLEN) { - code = XFS_ERROR(EINVAL); - goto error_return; - } - - if (XFS_IS_REALTIME_INODE(ip) || - ((mask & FSX_XFLAGS) && - (fa->fsx_xflags & XFS_XFLAG_REALTIME))) { - size = mp->m_sb.sb_rextsize << - mp->m_sb.sb_blocklog; - } else { - size = mp->m_sb.sb_blocksize; - if (extsize_fsb > mp->m_sb.sb_agblocks / 2) { - code = XFS_ERROR(EINVAL); - goto error_return; - } - } - - if (fa->fsx_extsize % size) { - code = XFS_ERROR(EINVAL); - goto error_return; - } - } - } - - - if (mask & FSX_XFLAGS) { - /* - * Can't change realtime flag if any extents are allocated. - */ - if ((ip->i_d.di_nextents || ip->i_delayed_blks) && - (XFS_IS_REALTIME_INODE(ip)) != - (fa->fsx_xflags & XFS_XFLAG_REALTIME)) { - code = XFS_ERROR(EINVAL); /* EFBIG? */ - goto error_return; - } - - /* - * If realtime flag is set then must have realtime data. - */ - if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) { - if ((mp->m_sb.sb_rblocks == 0) || - (mp->m_sb.sb_rextsize == 0) || - (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { - code = XFS_ERROR(EINVAL); - goto error_return; - } - } - - /* - * Can't modify an immutable/append-only file unless - * we have appropriate permission. - */ - if ((ip->i_d.di_flags & - (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) || - (fa->fsx_xflags & - (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && - !capable(CAP_LINUX_IMMUTABLE)) { - code = XFS_ERROR(EPERM); - goto error_return; - } - } - - xfs_trans_ijoin(tp, ip); - - /* - * Change file ownership. Must be the owner or privileged. - */ - if (mask & FSX_PROJID) { - /* - * CAP_FSETID overrides the following restrictions: - * - * The set-user-ID and set-group-ID bits of a file will be - * cleared upon successful return from chown() - */ - if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && - !capable(CAP_FSETID)) - ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); - - /* - * Change the ownerships and register quota modifications - * in the transaction. - */ - if (xfs_get_projid(ip) != fa->fsx_projid) { - if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { - olddquot = xfs_qm_vop_chown(tp, ip, - &ip->i_gdquot, gdqp); - } - xfs_set_projid(ip, fa->fsx_projid); - - /* - * We may have to rev the inode as well as - * the superblock version number since projids didn't - * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. - */ - if (ip->i_d.di_version == 1) - xfs_bump_ino_vers2(tp, ip); - } - - } - - if (mask & FSX_EXTSIZE) - ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; - if (mask & FSX_XFLAGS) { - xfs_set_diflags(ip, fa->fsx_xflags); - xfs_diflags_to_linux(ip); - } - - xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - - XFS_STATS_INC(xs_ig_attrchg); - - /* - * If this is a synchronous mount, make sure that the - * transaction goes to disk before returning to the user. - * This is slightly sub-optimal in that truncates require - * two sync transactions instead of one for wsync filesystems. - * One for the truncate and one for the timestamps since we - * don't want to change the timestamps unless we're sure the - * truncate worked. Truncates are less than 1% of the laddis - * mix so this probably isn't worth the trouble to optimize. - */ - if (mp->m_flags & XFS_MOUNT_WSYNC) - xfs_trans_set_sync(tp); - code = xfs_trans_commit(tp, 0); - xfs_iunlock(ip, lock_flags); - - /* - * Release any dquot(s) the inode had kept before chown. - */ - xfs_qm_dqrele(olddquot); - xfs_qm_dqrele(udqp); - xfs_qm_dqrele(gdqp); - - return code; - - error_return: - xfs_qm_dqrele(udqp); - xfs_qm_dqrele(gdqp); - xfs_trans_cancel(tp, 0); - if (lock_flags) - xfs_iunlock(ip, lock_flags); - return code; -} - -STATIC int -xfs_ioc_fssetxattr( - xfs_inode_t *ip, - struct file *filp, - void __user *arg) -{ - struct fsxattr fa; - unsigned int mask; - - if (copy_from_user(&fa, arg, sizeof(fa))) - return -EFAULT; - - mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID; - if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) - mask |= FSX_NONBLOCK; - - return -xfs_ioctl_setattr(ip, &fa, mask); -} - -STATIC int -xfs_ioc_getxflags( - xfs_inode_t *ip, - void __user *arg) -{ - unsigned int flags; - - flags = xfs_di2lxflags(ip->i_d.di_flags); - if (copy_to_user(arg, &flags, sizeof(flags))) - return -EFAULT; - return 0; -} - -STATIC int -xfs_ioc_setxflags( - xfs_inode_t *ip, - struct file *filp, - void __user *arg) -{ - struct fsxattr fa; - unsigned int flags; - unsigned int mask; - - if (copy_from_user(&flags, arg, sizeof(flags))) - return -EFAULT; - - if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ - FS_NOATIME_FL | FS_NODUMP_FL | \ - FS_SYNC_FL)) - return -EOPNOTSUPP; - - mask = FSX_XFLAGS; - if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) - mask |= FSX_NONBLOCK; - fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); - - return -xfs_ioctl_setattr(ip, &fa, mask); -} - -STATIC int -xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full) -{ - struct getbmap __user *base = *ap; - - /* copy only getbmap portion (not getbmapx) */ - if (copy_to_user(base, bmv, sizeof(struct getbmap))) - return XFS_ERROR(EFAULT); - - *ap += sizeof(struct getbmap); - return 0; -} - -STATIC int -xfs_ioc_getbmap( - struct xfs_inode *ip, - int ioflags, - unsigned int cmd, - void __user *arg) -{ - struct getbmapx bmx; - int error; - - if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) - return -XFS_ERROR(EFAULT); - - if (bmx.bmv_count < 2) - return -XFS_ERROR(EINVAL); - - bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); - if (ioflags & IO_INVIS) - bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; - - error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, - (struct getbmap *)arg+1); - if (error) - return -error; - - /* copy back header - only size of getbmap */ - if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) - return -XFS_ERROR(EFAULT); - return 0; -} - -STATIC int -xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full) -{ - struct getbmapx __user *base = *ap; - - if (copy_to_user(base, bmv, sizeof(struct getbmapx))) - return XFS_ERROR(EFAULT); - - *ap += sizeof(struct getbmapx); - return 0; -} - -STATIC int -xfs_ioc_getbmapx( - struct xfs_inode *ip, - void __user *arg) -{ - struct getbmapx bmx; - int error; - - if (copy_from_user(&bmx, arg, sizeof(bmx))) - return -XFS_ERROR(EFAULT); - - if (bmx.bmv_count < 2) - return -XFS_ERROR(EINVAL); - - if (bmx.bmv_iflags & (~BMV_IF_VALID)) - return -XFS_ERROR(EINVAL); - - error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, - (struct getbmapx *)arg+1); - if (error) - return -error; - - /* copy back header */ - if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) - return -XFS_ERROR(EFAULT); - - return 0; -} - -/* - * Note: some of the ioctl's return positive numbers as a - * byte count indicating success, such as readlink_by_handle. - * So we don't "sign flip" like most other routines. This means - * true errors need to be returned as a negative value. - */ -long -xfs_file_ioctl( - struct file *filp, - unsigned int cmd, - unsigned long p) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - void __user *arg = (void __user *)p; - int ioflags = 0; - int error; - - if (filp->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; - - trace_xfs_file_ioctl(ip); - - switch (cmd) { - case FITRIM: - return xfs_ioc_trim(mp, arg); - case XFS_IOC_ALLOCSP: - case XFS_IOC_FREESP: - case XFS_IOC_RESVSP: - case XFS_IOC_UNRESVSP: - case XFS_IOC_ALLOCSP64: - case XFS_IOC_FREESP64: - case XFS_IOC_RESVSP64: - case XFS_IOC_UNRESVSP64: - case XFS_IOC_ZERO_RANGE: { - xfs_flock64_t bf; - - if (copy_from_user(&bf, arg, sizeof(bf))) - return -XFS_ERROR(EFAULT); - return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); - } - case XFS_IOC_DIOINFO: { - struct dioattr da; - xfs_buftarg_t *target = - XFS_IS_REALTIME_INODE(ip) ? - mp->m_rtdev_targp : mp->m_ddev_targp; - - da.d_mem = da.d_miniosz = 1 << target->bt_sshift; - da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); - - if (copy_to_user(arg, &da, sizeof(da))) - return -XFS_ERROR(EFAULT); - return 0; - } - - case XFS_IOC_FSBULKSTAT_SINGLE: - case XFS_IOC_FSBULKSTAT: - case XFS_IOC_FSINUMBERS: - return xfs_ioc_bulkstat(mp, cmd, arg); - - case XFS_IOC_FSGEOMETRY_V1: - return xfs_ioc_fsgeometry_v1(mp, arg); - - case XFS_IOC_FSGEOMETRY: - return xfs_ioc_fsgeometry(mp, arg); - - case XFS_IOC_GETVERSION: - return put_user(inode->i_generation, (int __user *)arg); - - case XFS_IOC_FSGETXATTR: - return xfs_ioc_fsgetxattr(ip, 0, arg); - case XFS_IOC_FSGETXATTRA: - return xfs_ioc_fsgetxattr(ip, 1, arg); - case XFS_IOC_FSSETXATTR: - return xfs_ioc_fssetxattr(ip, filp, arg); - case XFS_IOC_GETXFLAGS: - return xfs_ioc_getxflags(ip, arg); - case XFS_IOC_SETXFLAGS: - return xfs_ioc_setxflags(ip, filp, arg); - - case XFS_IOC_FSSETDM: { - struct fsdmidata dmi; - - if (copy_from_user(&dmi, arg, sizeof(dmi))) - return -XFS_ERROR(EFAULT); - - error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, - dmi.fsd_dmstate); - return -error; - } - - case XFS_IOC_GETBMAP: - case XFS_IOC_GETBMAPA: - return xfs_ioc_getbmap(ip, ioflags, cmd, arg); - - case XFS_IOC_GETBMAPX: - return xfs_ioc_getbmapx(ip, arg); - - case XFS_IOC_FD_TO_HANDLE: - case XFS_IOC_PATH_TO_HANDLE: - case XFS_IOC_PATH_TO_FSHANDLE: { - xfs_fsop_handlereq_t hreq; - - if (copy_from_user(&hreq, arg, sizeof(hreq))) - return -XFS_ERROR(EFAULT); - return xfs_find_handle(cmd, &hreq); - } - case XFS_IOC_OPEN_BY_HANDLE: { - xfs_fsop_handlereq_t hreq; - - if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) - return -XFS_ERROR(EFAULT); - return xfs_open_by_handle(filp, &hreq); - } - case XFS_IOC_FSSETDM_BY_HANDLE: - return xfs_fssetdm_by_handle(filp, arg); - - case XFS_IOC_READLINK_BY_HANDLE: { - xfs_fsop_handlereq_t hreq; - - if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) - return -XFS_ERROR(EFAULT); - return xfs_readlink_by_handle(filp, &hreq); - } - case XFS_IOC_ATTRLIST_BY_HANDLE: - return xfs_attrlist_by_handle(filp, arg); - - case XFS_IOC_ATTRMULTI_BY_HANDLE: - return xfs_attrmulti_by_handle(filp, arg); - - case XFS_IOC_SWAPEXT: { - struct xfs_swapext sxp; - - if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) - return -XFS_ERROR(EFAULT); - error = xfs_swapext(&sxp); - return -error; - } - - case XFS_IOC_FSCOUNTS: { - xfs_fsop_counts_t out; - - error = xfs_fs_counts(mp, &out); - if (error) - return -error; - - if (copy_to_user(arg, &out, sizeof(out))) - return -XFS_ERROR(EFAULT); - return 0; - } - - case XFS_IOC_SET_RESBLKS: { - xfs_fsop_resblks_t inout; - __uint64_t in; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (mp->m_flags & XFS_MOUNT_RDONLY) - return -XFS_ERROR(EROFS); - - if (copy_from_user(&inout, arg, sizeof(inout))) - return -XFS_ERROR(EFAULT); - - /* input parameter is passed in resblks field of structure */ - in = inout.resblks; - error = xfs_reserve_blocks(mp, &in, &inout); - if (error) - return -error; - - if (copy_to_user(arg, &inout, sizeof(inout))) - return -XFS_ERROR(EFAULT); - return 0; - } - - case XFS_IOC_GET_RESBLKS: { - xfs_fsop_resblks_t out; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - error = xfs_reserve_blocks(mp, NULL, &out); - if (error) - return -error; - - if (copy_to_user(arg, &out, sizeof(out))) - return -XFS_ERROR(EFAULT); - - return 0; - } - - case XFS_IOC_FSGROWFSDATA: { - xfs_growfs_data_t in; - - if (copy_from_user(&in, arg, sizeof(in))) - return -XFS_ERROR(EFAULT); - - error = xfs_growfs_data(mp, &in); - return -error; - } - - case XFS_IOC_FSGROWFSLOG: { - xfs_growfs_log_t in; - - if (copy_from_user(&in, arg, sizeof(in))) - return -XFS_ERROR(EFAULT); - - error = xfs_growfs_log(mp, &in); - return -error; - } - - case XFS_IOC_FSGROWFSRT: { - xfs_growfs_rt_t in; - - if (copy_from_user(&in, arg, sizeof(in))) - return -XFS_ERROR(EFAULT); - - error = xfs_growfs_rt(mp, &in); - return -error; - } - - case XFS_IOC_GOINGDOWN: { - __uint32_t in; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (get_user(in, (__uint32_t __user *)arg)) - return -XFS_ERROR(EFAULT); - - error = xfs_fs_goingdown(mp, in); - return -error; - } - - case XFS_IOC_ERROR_INJECTION: { - xfs_error_injection_t in; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (copy_from_user(&in, arg, sizeof(in))) - return -XFS_ERROR(EFAULT); - - error = xfs_errortag_add(in.errtag, mp); - return -error; - } - - case XFS_IOC_ERROR_CLEARALL: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - error = xfs_errortag_clearall(mp, 1); - return -error; - - default: - return -ENOTTY; - } -} diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h deleted file mode 100644 index d56173b..0000000 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2008 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_IOCTL_H__ -#define __XFS_IOCTL_H__ - -extern int -xfs_ioc_space( - struct xfs_inode *ip, - struct inode *inode, - struct file *filp, - int ioflags, - unsigned int cmd, - xfs_flock64_t *bf); - -extern int -xfs_find_handle( - unsigned int cmd, - xfs_fsop_handlereq_t *hreq); - -extern int -xfs_open_by_handle( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq); - -extern int -xfs_readlink_by_handle( - struct file *parfilp, - xfs_fsop_handlereq_t *hreq); - -extern int -xfs_attrmulti_attr_get( - struct inode *inode, - unsigned char *name, - unsigned char __user *ubuf, - __uint32_t *len, - __uint32_t flags); - -extern int -xfs_attrmulti_attr_set( - struct inode *inode, - unsigned char *name, - const unsigned char __user *ubuf, - __uint32_t len, - __uint32_t flags); - -extern int -xfs_attrmulti_attr_remove( - struct inode *inode, - unsigned char *name, - __uint32_t flags); - -extern struct dentry * -xfs_handle_to_dentry( - struct file *parfilp, - void __user *uhandle, - u32 hlen); - -extern long -xfs_file_ioctl( - struct file *filp, - unsigned int cmd, - unsigned long p); - -extern long -xfs_file_compat_ioctl( - struct file *file, - unsigned int cmd, - unsigned long arg); - -#endif diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c deleted file mode 100644 index 54e623b..0000000 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ /dev/null @@ -1,672 +0,0 @@ -/* - * Copyright (c) 2004-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include -#include -#include -#include -#include -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_vnode.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_itable.h" -#include "xfs_error.h" -#include "xfs_dfrag.h" -#include "xfs_vnodeops.h" -#include "xfs_fsops.h" -#include "xfs_alloc.h" -#include "xfs_rtalloc.h" -#include "xfs_attr.h" -#include "xfs_ioctl.h" -#include "xfs_ioctl32.h" -#include "xfs_trace.h" - -#define _NATIVE_IOC(cmd, type) \ - _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) - -#ifdef BROKEN_X86_ALIGNMENT -STATIC int -xfs_compat_flock64_copyin( - xfs_flock64_t *bf, - compat_xfs_flock64_t __user *arg32) -{ - if (get_user(bf->l_type, &arg32->l_type) || - get_user(bf->l_whence, &arg32->l_whence) || - get_user(bf->l_start, &arg32->l_start) || - get_user(bf->l_len, &arg32->l_len) || - get_user(bf->l_sysid, &arg32->l_sysid) || - get_user(bf->l_pid, &arg32->l_pid) || - copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) - return -XFS_ERROR(EFAULT); - return 0; -} - -STATIC int -xfs_compat_ioc_fsgeometry_v1( - struct xfs_mount *mp, - compat_xfs_fsop_geom_v1_t __user *arg32) -{ - xfs_fsop_geom_t fsgeo; - int error; - - error = xfs_fs_geometry(mp, &fsgeo, 3); - if (error) - return -error; - /* The 32-bit variant simply has some padding at the end */ - if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) - return -XFS_ERROR(EFAULT); - return 0; -} - -STATIC int -xfs_compat_growfs_data_copyin( - struct xfs_growfs_data *in, - compat_xfs_growfs_data_t __user *arg32) -{ - if (get_user(in->newblocks, &arg32->newblocks) || - get_user(in->imaxpct, &arg32->imaxpct)) - return -XFS_ERROR(EFAULT); - return 0; -} - -STATIC int -xfs_compat_growfs_rt_copyin( - struct xfs_growfs_rt *in, - compat_xfs_growfs_rt_t __user *arg32) -{ - if (get_user(in->newblocks, &arg32->newblocks) || - get_user(in->extsize, &arg32->extsize)) - return -XFS_ERROR(EFAULT); - return 0; -} - -STATIC int -xfs_inumbers_fmt_compat( - void __user *ubuffer, - const xfs_inogrp_t *buffer, - long count, - long *written) -{ - compat_xfs_inogrp_t __user *p32 = ubuffer; - long i; - - for (i = 0; i < count; i++) { - if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || - put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || - put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) - return -XFS_ERROR(EFAULT); - } - *written = count * sizeof(*p32); - return 0; -} - -#else -#define xfs_inumbers_fmt_compat xfs_inumbers_fmt -#endif /* BROKEN_X86_ALIGNMENT */ - -STATIC int -xfs_ioctl32_bstime_copyin( - xfs_bstime_t *bstime, - compat_xfs_bstime_t __user *bstime32) -{ - compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */ - - if (get_user(sec32, &bstime32->tv_sec) || - get_user(bstime->tv_nsec, &bstime32->tv_nsec)) - return -XFS_ERROR(EFAULT); - bstime->tv_sec = sec32; - return 0; -} - -/* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */ -STATIC int -xfs_ioctl32_bstat_copyin( - xfs_bstat_t *bstat, - compat_xfs_bstat_t __user *bstat32) -{ - if (get_user(bstat->bs_ino, &bstat32->bs_ino) || - get_user(bstat->bs_mode, &bstat32->bs_mode) || - get_user(bstat->bs_nlink, &bstat32->bs_nlink) || - get_user(bstat->bs_uid, &bstat32->bs_uid) || - get_user(bstat->bs_gid, &bstat32->bs_gid) || - get_user(bstat->bs_rdev, &bstat32->bs_rdev) || - get_user(bstat->bs_blksize, &bstat32->bs_blksize) || - get_user(bstat->bs_size, &bstat32->bs_size) || - xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) || - xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) || - xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) || - get_user(bstat->bs_blocks, &bstat32->bs_size) || - get_user(bstat->bs_xflags, &bstat32->bs_size) || - get_user(bstat->bs_extsize, &bstat32->bs_extsize) || - get_user(bstat->bs_extents, &bstat32->bs_extents) || - get_user(bstat->bs_gen, &bstat32->bs_gen) || - get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) || - get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) || - get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || - get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || - get_user(bstat->bs_aextents, &bstat32->bs_aextents)) - return -XFS_ERROR(EFAULT); - return 0; -} - -/* XFS_IOC_FSBULKSTAT and friends */ - -STATIC int -xfs_bstime_store_compat( - compat_xfs_bstime_t __user *p32, - const xfs_bstime_t *p) -{ - __s32 sec32; - - sec32 = p->tv_sec; - if (put_user(sec32, &p32->tv_sec) || - put_user(p->tv_nsec, &p32->tv_nsec)) - return -XFS_ERROR(EFAULT); - return 0; -} - -/* Return 0 on success or positive error (to xfs_bulkstat()) */ -STATIC int -xfs_bulkstat_one_fmt_compat( - void __user *ubuffer, - int ubsize, - int *ubused, - const xfs_bstat_t *buffer) -{ - compat_xfs_bstat_t __user *p32 = ubuffer; - - if (ubsize < sizeof(*p32)) - return XFS_ERROR(ENOMEM); - - if (put_user(buffer->bs_ino, &p32->bs_ino) || - put_user(buffer->bs_mode, &p32->bs_mode) || - put_user(buffer->bs_nlink, &p32->bs_nlink) || - put_user(buffer->bs_uid, &p32->bs_uid) || - put_user(buffer->bs_gid, &p32->bs_gid) || - put_user(buffer->bs_rdev, &p32->bs_rdev) || - put_user(buffer->bs_blksize, &p32->bs_blksize) || - put_user(buffer->bs_size, &p32->bs_size) || - xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || - xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || - xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || - put_user(buffer->bs_blocks, &p32->bs_blocks) || - put_user(buffer->bs_xflags, &p32->bs_xflags) || - put_user(buffer->bs_extsize, &p32->bs_extsize) || - put_user(buffer->bs_extents, &p32->bs_extents) || - put_user(buffer->bs_gen, &p32->bs_gen) || - put_user(buffer->bs_projid, &p32->bs_projid) || - put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) || - put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || - put_user(buffer->bs_dmstate, &p32->bs_dmstate) || - put_user(buffer->bs_aextents, &p32->bs_aextents)) - return XFS_ERROR(EFAULT); - if (ubused) - *ubused = sizeof(*p32); - return 0; -} - -STATIC int -xfs_bulkstat_one_compat( - xfs_mount_t *mp, /* mount point for filesystem */ - xfs_ino_t ino, /* inode number to get data for */ - void __user *buffer, /* buffer to place output in */ - int ubsize, /* size of buffer */ - int *ubused, /* bytes used by me */ - int *stat) /* BULKSTAT_RV_... */ -{ - return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, - xfs_bulkstat_one_fmt_compat, - ubused, stat); -} - -/* copied from xfs_ioctl.c */ -STATIC int -xfs_compat_ioc_bulkstat( - xfs_mount_t *mp, - unsigned int cmd, - compat_xfs_fsop_bulkreq_t __user *p32) -{ - u32 addr; - xfs_fsop_bulkreq_t bulkreq; - int count; /* # of records returned */ - xfs_ino_t inlast; /* last inode number */ - int done; - int error; - - /* done = 1 if there are more stats to get and if bulkstat */ - /* should be called again (unused here, but used in dmapi) */ - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - - if (XFS_FORCED_SHUTDOWN(mp)) - return -XFS_ERROR(EIO); - - if (get_user(addr, &p32->lastip)) - return -XFS_ERROR(EFAULT); - bulkreq.lastip = compat_ptr(addr); - if (get_user(bulkreq.icount, &p32->icount) || - get_user(addr, &p32->ubuffer)) - return -XFS_ERROR(EFAULT); - bulkreq.ubuffer = compat_ptr(addr); - if (get_user(addr, &p32->ocount)) - return -XFS_ERROR(EFAULT); - bulkreq.ocount = compat_ptr(addr); - - if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) - return -XFS_ERROR(EFAULT); - - if ((count = bulkreq.icount) <= 0) - return -XFS_ERROR(EINVAL); - - if (bulkreq.ubuffer == NULL) - return -XFS_ERROR(EINVAL); - - if (cmd == XFS_IOC_FSINUMBERS_32) { - error = xfs_inumbers(mp, &inlast, &count, - bulkreq.ubuffer, xfs_inumbers_fmt_compat); - } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) { - int res; - - error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, - sizeof(compat_xfs_bstat_t), 0, &res); - } else if (cmd == XFS_IOC_FSBULKSTAT_32) { - error = xfs_bulkstat(mp, &inlast, &count, - xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), - bulkreq.ubuffer, &done); - } else - error = XFS_ERROR(EINVAL); - if (error) - return -error; - - if (bulkreq.ocount != NULL) { - if (copy_to_user(bulkreq.lastip, &inlast, - sizeof(xfs_ino_t))) - return -XFS_ERROR(EFAULT); - - if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) - return -XFS_ERROR(EFAULT); - } - - return 0; -} - -STATIC int -xfs_compat_handlereq_copyin( - xfs_fsop_handlereq_t *hreq, - compat_xfs_fsop_handlereq_t __user *arg32) -{ - compat_xfs_fsop_handlereq_t hreq32; - - if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) - return -XFS_ERROR(EFAULT); - - hreq->fd = hreq32.fd; - hreq->path = compat_ptr(hreq32.path); - hreq->oflags = hreq32.oflags; - hreq->ihandle = compat_ptr(hreq32.ihandle); - hreq->ihandlen = hreq32.ihandlen; - hreq->ohandle = compat_ptr(hreq32.ohandle); - hreq->ohandlen = compat_ptr(hreq32.ohandlen); - - return 0; -} - -STATIC struct dentry * -xfs_compat_handlereq_to_dentry( - struct file *parfilp, - compat_xfs_fsop_handlereq_t *hreq) -{ - return xfs_handle_to_dentry(parfilp, - compat_ptr(hreq->ihandle), hreq->ihandlen); -} - -STATIC int -xfs_compat_attrlist_by_handle( - struct file *parfilp, - void __user *arg) -{ - int error; - attrlist_cursor_kern_t *cursor; - compat_xfs_fsop_attrlist_handlereq_t al_hreq; - struct dentry *dentry; - char *kbuf; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - if (copy_from_user(&al_hreq, arg, - sizeof(compat_xfs_fsop_attrlist_handlereq_t))) - return -XFS_ERROR(EFAULT); - if (al_hreq.buflen > XATTR_LIST_MAX) - return -XFS_ERROR(EINVAL); - - /* - * Reject flags, only allow namespaces. - */ - if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) - return -XFS_ERROR(EINVAL); - - dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - error = -ENOMEM; - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); - if (!kbuf) - goto out_dput; - - cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; - error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, - al_hreq.flags, cursor); - if (error) - goto out_kfree; - - if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen)) - error = -EFAULT; - - out_kfree: - kfree(kbuf); - out_dput: - dput(dentry); - return error; -} - -STATIC int -xfs_compat_attrmulti_by_handle( - struct file *parfilp, - void __user *arg) -{ - int error; - compat_xfs_attr_multiop_t *ops; - compat_xfs_fsop_attrmulti_handlereq_t am_hreq; - struct dentry *dentry; - unsigned int i, size; - unsigned char *attr_name; - - if (!capable(CAP_SYS_ADMIN)) - return -XFS_ERROR(EPERM); - if (copy_from_user(&am_hreq, arg, - sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) - return -XFS_ERROR(EFAULT); - - /* overflow check */ - if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) - return -E2BIG; - - dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - error = E2BIG; - size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); - if (!size || size > 16 * PAGE_SIZE) - goto out_dput; - - ops = memdup_user(compat_ptr(am_hreq.ops), size); - if (IS_ERR(ops)) { - error = PTR_ERR(ops); - goto out_dput; - } - - attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); - if (!attr_name) - goto out_kfree_ops; - - error = 0; - for (i = 0; i < am_hreq.opcount; i++) { - ops[i].am_error = strncpy_from_user((char *)attr_name, - compat_ptr(ops[i].am_attrname), - MAXNAMELEN); - if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) - error = -ERANGE; - if (ops[i].am_error < 0) - break; - - switch (ops[i].am_opcode) { - case ATTR_OP_GET: - ops[i].am_error = xfs_attrmulti_attr_get( - dentry->d_inode, attr_name, - compat_ptr(ops[i].am_attrvalue), - &ops[i].am_length, ops[i].am_flags); - break; - case ATTR_OP_SET: - ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); - if (ops[i].am_error) - break; - ops[i].am_error = xfs_attrmulti_attr_set( - dentry->d_inode, attr_name, - compat_ptr(ops[i].am_attrvalue), - ops[i].am_length, ops[i].am_flags); - mnt_drop_write(parfilp->f_path.mnt); - break; - case ATTR_OP_REMOVE: - ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); - if (ops[i].am_error) - break; - ops[i].am_error = xfs_attrmulti_attr_remove( - dentry->d_inode, attr_name, - ops[i].am_flags); - mnt_drop_write(parfilp->f_path.mnt); - break; - default: - ops[i].am_error = EINVAL; - } - } - - if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) - error = XFS_ERROR(EFAULT); - - kfree(attr_name); - out_kfree_ops: - kfree(ops); - out_dput: - dput(dentry); - return -error; -} - -STATIC int -xfs_compat_fssetdm_by_handle( - struct file *parfilp, - void __user *arg) -{ - int error; - struct fsdmidata fsd; - compat_xfs_fsop_setdm_handlereq_t dmhreq; - struct dentry *dentry; - - if (!capable(CAP_MKNOD)) - return -XFS_ERROR(EPERM); - if (copy_from_user(&dmhreq, arg, - sizeof(compat_xfs_fsop_setdm_handlereq_t))) - return -XFS_ERROR(EFAULT); - - dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { - error = -XFS_ERROR(EPERM); - goto out; - } - - if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { - error = -XFS_ERROR(EFAULT); - goto out; - } - - error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, - fsd.fsd_dmstate); - -out: - dput(dentry); - return error; -} - -long -xfs_file_compat_ioctl( - struct file *filp, - unsigned cmd, - unsigned long p) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - void __user *arg = (void __user *)p; - int ioflags = 0; - int error; - - if (filp->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; - - trace_xfs_file_compat_ioctl(ip); - - switch (cmd) { - /* No size or alignment issues on any arch */ - case XFS_IOC_DIOINFO: - case XFS_IOC_FSGEOMETRY: - case XFS_IOC_FSGETXATTR: - case XFS_IOC_FSSETXATTR: - case XFS_IOC_FSGETXATTRA: - case XFS_IOC_FSSETDM: - case XFS_IOC_GETBMAP: - case XFS_IOC_GETBMAPA: - case XFS_IOC_GETBMAPX: - case XFS_IOC_FSCOUNTS: - case XFS_IOC_SET_RESBLKS: - case XFS_IOC_GET_RESBLKS: - case XFS_IOC_FSGROWFSLOG: - case XFS_IOC_GOINGDOWN: - case XFS_IOC_ERROR_INJECTION: - case XFS_IOC_ERROR_CLEARALL: - return xfs_file_ioctl(filp, cmd, p); -#ifndef BROKEN_X86_ALIGNMENT - /* These are handled fine if no alignment issues */ - case XFS_IOC_ALLOCSP: - case XFS_IOC_FREESP: - case XFS_IOC_RESVSP: - case XFS_IOC_UNRESVSP: - case XFS_IOC_ALLOCSP64: - case XFS_IOC_FREESP64: - case XFS_IOC_RESVSP64: - case XFS_IOC_UNRESVSP64: - case XFS_IOC_FSGEOMETRY_V1: - case XFS_IOC_FSGROWFSDATA: - case XFS_IOC_FSGROWFSRT: - case XFS_IOC_ZERO_RANGE: - return xfs_file_ioctl(filp, cmd, p); -#else - case XFS_IOC_ALLOCSP_32: - case XFS_IOC_FREESP_32: - case XFS_IOC_ALLOCSP64_32: - case XFS_IOC_FREESP64_32: - case XFS_IOC_RESVSP_32: - case XFS_IOC_UNRESVSP_32: - case XFS_IOC_RESVSP64_32: - case XFS_IOC_UNRESVSP64_32: - case XFS_IOC_ZERO_RANGE_32: { - struct xfs_flock64 bf; - - if (xfs_compat_flock64_copyin(&bf, arg)) - return -XFS_ERROR(EFAULT); - cmd = _NATIVE_IOC(cmd, struct xfs_flock64); - return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); - } - case XFS_IOC_FSGEOMETRY_V1_32: - return xfs_compat_ioc_fsgeometry_v1(mp, arg); - case XFS_IOC_FSGROWFSDATA_32: { - struct xfs_growfs_data in; - - if (xfs_compat_growfs_data_copyin(&in, arg)) - return -XFS_ERROR(EFAULT); - error = xfs_growfs_data(mp, &in); - return -error; - } - case XFS_IOC_FSGROWFSRT_32: { - struct xfs_growfs_rt in; - - if (xfs_compat_growfs_rt_copyin(&in, arg)) - return -XFS_ERROR(EFAULT); - error = xfs_growfs_rt(mp, &in); - return -error; - } -#endif - /* long changes size, but xfs only copiese out 32 bits */ - case XFS_IOC_GETXFLAGS_32: - case XFS_IOC_SETXFLAGS_32: - case XFS_IOC_GETVERSION_32: - cmd = _NATIVE_IOC(cmd, long); - return xfs_file_ioctl(filp, cmd, p); - case XFS_IOC_SWAPEXT_32: { - struct xfs_swapext sxp; - struct compat_xfs_swapext __user *sxu = arg; - - /* Bulk copy in up to the sx_stat field, then copy bstat */ - if (copy_from_user(&sxp, sxu, - offsetof(struct xfs_swapext, sx_stat)) || - xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) - return -XFS_ERROR(EFAULT); - error = xfs_swapext(&sxp); - return -error; - } - case XFS_IOC_FSBULKSTAT_32: - case XFS_IOC_FSBULKSTAT_SINGLE_32: - case XFS_IOC_FSINUMBERS_32: - return xfs_compat_ioc_bulkstat(mp, cmd, arg); - case XFS_IOC_FD_TO_HANDLE_32: - case XFS_IOC_PATH_TO_HANDLE_32: - case XFS_IOC_PATH_TO_FSHANDLE_32: { - struct xfs_fsop_handlereq hreq; - - if (xfs_compat_handlereq_copyin(&hreq, arg)) - return -XFS_ERROR(EFAULT); - cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); - return xfs_find_handle(cmd, &hreq); - } - case XFS_IOC_OPEN_BY_HANDLE_32: { - struct xfs_fsop_handlereq hreq; - - if (xfs_compat_handlereq_copyin(&hreq, arg)) - return -XFS_ERROR(EFAULT); - return xfs_open_by_handle(filp, &hreq); - } - case XFS_IOC_READLINK_BY_HANDLE_32: { - struct xfs_fsop_handlereq hreq; - - if (xfs_compat_handlereq_copyin(&hreq, arg)) - return -XFS_ERROR(EFAULT); - return xfs_readlink_by_handle(filp, &hreq); - } - case XFS_IOC_ATTRLIST_BY_HANDLE_32: - return xfs_compat_attrlist_by_handle(filp, arg); - case XFS_IOC_ATTRMULTI_BY_HANDLE_32: - return xfs_compat_attrmulti_by_handle(filp, arg); - case XFS_IOC_FSSETDM_BY_HANDLE_32: - return xfs_compat_fssetdm_by_handle(filp, arg); - default: - return -XFS_ERROR(ENOIOCTLCMD); - } -} diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h deleted file mode 100644 index 80f4060..0000000 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Copyright (c) 2004-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_IOCTL32_H__ -#define __XFS_IOCTL32_H__ - -#include - -/* - * on 32-bit arches, ioctl argument structures may have different sizes - * and/or alignment. We define compat structures which match the - * 32-bit sizes/alignments here, and their associated ioctl numbers. - * - * xfs_ioctl32.c contains routines to copy these structures in and out. - */ - -/* stock kernel-level ioctls we support */ -#define XFS_IOC_GETXFLAGS_32 FS_IOC32_GETFLAGS -#define XFS_IOC_SETXFLAGS_32 FS_IOC32_SETFLAGS -#define XFS_IOC_GETVERSION_32 FS_IOC32_GETVERSION - -/* - * On intel, even if sizes match, alignment and/or padding may differ. - */ -#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) -#define BROKEN_X86_ALIGNMENT -#define __compat_packed __attribute__((packed)) -#else -#define __compat_packed -#endif - -typedef struct compat_xfs_bstime { - compat_time_t tv_sec; /* seconds */ - __s32 tv_nsec; /* and nanoseconds */ -} compat_xfs_bstime_t; - -typedef struct compat_xfs_bstat { - __u64 bs_ino; /* inode number */ - __u16 bs_mode; /* type and mode */ - __u16 bs_nlink; /* number of links */ - __u32 bs_uid; /* user id */ - __u32 bs_gid; /* group id */ - __u32 bs_rdev; /* device value */ - __s32 bs_blksize; /* block size */ - __s64 bs_size; /* file size */ - compat_xfs_bstime_t bs_atime; /* access time */ - compat_xfs_bstime_t bs_mtime; /* modify time */ - compat_xfs_bstime_t bs_ctime; /* inode change time */ - int64_t bs_blocks; /* number of blocks */ - __u32 bs_xflags; /* extended flags */ - __s32 bs_extsize; /* extent size */ - __s32 bs_extents; /* number of extents */ - __u32 bs_gen; /* generation count */ - __u16 bs_projid_lo; /* lower part of project id */ -#define bs_projid bs_projid_lo /* (previously just bs_projid) */ - __u16 bs_projid_hi; /* high part of project id */ - unsigned char bs_pad[12]; /* pad space, unused */ - __u32 bs_dmevmask; /* DMIG event mask */ - __u16 bs_dmstate; /* DMIG state info */ - __u16 bs_aextents; /* attribute number of extents */ -} __compat_packed compat_xfs_bstat_t; - -typedef struct compat_xfs_fsop_bulkreq { - compat_uptr_t lastip; /* last inode # pointer */ - __s32 icount; /* count of entries in buffer */ - compat_uptr_t ubuffer; /* user buffer for inode desc. */ - compat_uptr_t ocount; /* output count pointer */ -} compat_xfs_fsop_bulkreq_t; - -#define XFS_IOC_FSBULKSTAT_32 \ - _IOWR('X', 101, struct compat_xfs_fsop_bulkreq) -#define XFS_IOC_FSBULKSTAT_SINGLE_32 \ - _IOWR('X', 102, struct compat_xfs_fsop_bulkreq) -#define XFS_IOC_FSINUMBERS_32 \ - _IOWR('X', 103, struct compat_xfs_fsop_bulkreq) - -typedef struct compat_xfs_fsop_handlereq { - __u32 fd; /* fd for FD_TO_HANDLE */ - compat_uptr_t path; /* user pathname */ - __u32 oflags; /* open flags */ - compat_uptr_t ihandle; /* user supplied handle */ - __u32 ihandlen; /* user supplied length */ - compat_uptr_t ohandle; /* user buffer for handle */ - compat_uptr_t ohandlen; /* user buffer length */ -} compat_xfs_fsop_handlereq_t; - -#define XFS_IOC_PATH_TO_FSHANDLE_32 \ - _IOWR('X', 104, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_PATH_TO_HANDLE_32 \ - _IOWR('X', 105, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_FD_TO_HANDLE_32 \ - _IOWR('X', 106, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_OPEN_BY_HANDLE_32 \ - _IOWR('X', 107, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_READLINK_BY_HANDLE_32 \ - _IOWR('X', 108, struct compat_xfs_fsop_handlereq) - -/* The bstat field in the swapext struct needs translation */ -typedef struct compat_xfs_swapext { - __int64_t sx_version; /* version */ - __int64_t sx_fdtarget; /* fd of target file */ - __int64_t sx_fdtmp; /* fd of tmp file */ - xfs_off_t sx_offset; /* offset into file */ - xfs_off_t sx_length; /* leng from offset */ - char sx_pad[16]; /* pad space, unused */ - compat_xfs_bstat_t sx_stat; /* stat of target b4 copy */ -} __compat_packed compat_xfs_swapext_t; - -#define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext) - -typedef struct compat_xfs_fsop_attrlist_handlereq { - struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */ - struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */ - __u32 flags; /* which namespace to use */ - __u32 buflen; /* length of buffer supplied */ - compat_uptr_t buffer; /* returned names */ -} __compat_packed compat_xfs_fsop_attrlist_handlereq_t; - -/* Note: actually this is read/write */ -#define XFS_IOC_ATTRLIST_BY_HANDLE_32 \ - _IOW('X', 122, struct compat_xfs_fsop_attrlist_handlereq) - -/* am_opcodes defined in xfs_fs.h */ -typedef struct compat_xfs_attr_multiop { - __u32 am_opcode; - __s32 am_error; - compat_uptr_t am_attrname; - compat_uptr_t am_attrvalue; - __u32 am_length; - __u32 am_flags; -} compat_xfs_attr_multiop_t; - -typedef struct compat_xfs_fsop_attrmulti_handlereq { - struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */ - __u32 opcount;/* count of following multiop */ - /* ptr to compat_xfs_attr_multiop */ - compat_uptr_t ops; /* attr_multi data */ -} compat_xfs_fsop_attrmulti_handlereq_t; - -#define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \ - _IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq) - -typedef struct compat_xfs_fsop_setdm_handlereq { - struct compat_xfs_fsop_handlereq hreq; /* handle information */ - /* ptr to struct fsdmidata */ - compat_uptr_t data; /* DMAPI data */ -} compat_xfs_fsop_setdm_handlereq_t; - -#define XFS_IOC_FSSETDM_BY_HANDLE_32 \ - _IOW('X', 121, struct compat_xfs_fsop_setdm_handlereq) - -#ifdef BROKEN_X86_ALIGNMENT -/* on ia32 l_start is on a 32-bit boundary */ -typedef struct compat_xfs_flock64 { - __s16 l_type; - __s16 l_whence; - __s64 l_start __attribute__((packed)); - /* len == 0 means until end of file */ - __s64 l_len __attribute__((packed)); - __s32 l_sysid; - __u32 l_pid; - __s32 l_pad[4]; /* reserve area */ -} compat_xfs_flock64_t; - -#define XFS_IOC_ALLOCSP_32 _IOW('X', 10, struct compat_xfs_flock64) -#define XFS_IOC_FREESP_32 _IOW('X', 11, struct compat_xfs_flock64) -#define XFS_IOC_ALLOCSP64_32 _IOW('X', 36, struct compat_xfs_flock64) -#define XFS_IOC_FREESP64_32 _IOW('X', 37, struct compat_xfs_flock64) -#define XFS_IOC_RESVSP_32 _IOW('X', 40, struct compat_xfs_flock64) -#define XFS_IOC_UNRESVSP_32 _IOW('X', 41, struct compat_xfs_flock64) -#define XFS_IOC_RESVSP64_32 _IOW('X', 42, struct compat_xfs_flock64) -#define XFS_IOC_UNRESVSP64_32 _IOW('X', 43, struct compat_xfs_flock64) -#define XFS_IOC_ZERO_RANGE_32 _IOW('X', 57, struct compat_xfs_flock64) - -typedef struct compat_xfs_fsop_geom_v1 { - __u32 blocksize; /* filesystem (data) block size */ - __u32 rtextsize; /* realtime extent size */ - __u32 agblocks; /* fsblocks in an AG */ - __u32 agcount; /* number of allocation groups */ - __u32 logblocks; /* fsblocks in the log */ - __u32 sectsize; /* (data) sector size, bytes */ - __u32 inodesize; /* inode size in bytes */ - __u32 imaxpct; /* max allowed inode space(%) */ - __u64 datablocks; /* fsblocks in data subvolume */ - __u64 rtblocks; /* fsblocks in realtime subvol */ - __u64 rtextents; /* rt extents in realtime subvol*/ - __u64 logstart; /* starting fsblock of the log */ - unsigned char uuid[16]; /* unique id of the filesystem */ - __u32 sunit; /* stripe unit, fsblocks */ - __u32 swidth; /* stripe width, fsblocks */ - __s32 version; /* structure version */ - __u32 flags; /* superblock version flags */ - __u32 logsectsize; /* log sector size, bytes */ - __u32 rtsectsize; /* realtime sector size, bytes */ - __u32 dirblocksize; /* directory block size, bytes */ -} __attribute__((packed)) compat_xfs_fsop_geom_v1_t; - -#define XFS_IOC_FSGEOMETRY_V1_32 \ - _IOR('X', 100, struct compat_xfs_fsop_geom_v1) - -typedef struct compat_xfs_inogrp { - __u64 xi_startino; /* starting inode number */ - __s32 xi_alloccount; /* # bits set in allocmask */ - __u64 xi_allocmask; /* mask of allocated inodes */ -} __attribute__((packed)) compat_xfs_inogrp_t; - -/* These growfs input structures have padding on the end, so must translate */ -typedef struct compat_xfs_growfs_data { - __u64 newblocks; /* new data subvol size, fsblocks */ - __u32 imaxpct; /* new inode space percentage limit */ -} __attribute__((packed)) compat_xfs_growfs_data_t; - -typedef struct compat_xfs_growfs_rt { - __u64 newblocks; /* new realtime size, fsblocks */ - __u32 extsize; /* new realtime extent size, fsblocks */ -} __attribute__((packed)) compat_xfs_growfs_rt_t; - -#define XFS_IOC_FSGROWFSDATA_32 _IOW('X', 110, struct compat_xfs_growfs_data) -#define XFS_IOC_FSGROWFSRT_32 _IOW('X', 112, struct compat_xfs_growfs_rt) - -#endif /* BROKEN_X86_ALIGNMENT */ - -#endif /* __XFS_IOCTL32_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c deleted file mode 100644 index b9c172b..0000000 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ /dev/null @@ -1,1210 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_acl.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_itable.h" -#include "xfs_rw.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_utils.h" -#include "xfs_vnodeops.h" -#include "xfs_inode_item.h" -#include "xfs_trace.h" - -#include -#include -#include -#include -#include -#include -#include - -/* - * Bring the timestamps in the XFS inode uptodate. - * - * Used before writing the inode to disk. - */ -void -xfs_synchronize_times( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; - ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; - ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec; - ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec; - ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec; - ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec; -} - -/* - * If the linux inode is valid, mark it dirty. - * Used when committing a dirty inode into a transaction so that - * the inode will get written back by the linux code - */ -void -xfs_mark_inode_dirty_sync( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) - mark_inode_dirty_sync(inode); -} - -void -xfs_mark_inode_dirty( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) - mark_inode_dirty(inode); -} - -/* - * Hook in SELinux. This is not quite correct yet, what we really need - * here (as we do for default ACLs) is a mechanism by which creation of - * these attrs can be journalled at inode creation time (along with the - * inode, of course, such that log replay can't cause these to be lost). - */ -STATIC int -xfs_init_security( - struct inode *inode, - struct inode *dir, - const struct qstr *qstr) -{ - struct xfs_inode *ip = XFS_I(inode); - size_t length; - void *value; - unsigned char *name; - int error; - - error = security_inode_init_security(inode, dir, qstr, (char **)&name, - &value, &length); - if (error) { - if (error == -EOPNOTSUPP) - return 0; - return -error; - } - - error = xfs_attr_set(ip, name, value, length, ATTR_SECURE); - - kfree(name); - kfree(value); - return error; -} - -static void -xfs_dentry_to_name( - struct xfs_name *namep, - struct dentry *dentry) -{ - namep->name = dentry->d_name.name; - namep->len = dentry->d_name.len; -} - -STATIC void -xfs_cleanup_inode( - struct inode *dir, - struct inode *inode, - struct dentry *dentry) -{ - struct xfs_name teardown; - - /* Oh, the horror. - * If we can't add the ACL or we fail in - * xfs_init_security we must back out. - * ENOSPC can hit here, among other things. - */ - xfs_dentry_to_name(&teardown, dentry); - - xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); - iput(inode); -} - -STATIC int -xfs_vn_mknod( - struct inode *dir, - struct dentry *dentry, - int mode, - dev_t rdev) -{ - struct inode *inode; - struct xfs_inode *ip = NULL; - struct posix_acl *default_acl = NULL; - struct xfs_name name; - int error; - - /* - * Irix uses Missed'em'V split, but doesn't want to see - * the upper 5 bits of (14bit) major. - */ - if (S_ISCHR(mode) || S_ISBLK(mode)) { - if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) - return -EINVAL; - rdev = sysv_encode_dev(rdev); - } else { - rdev = 0; - } - - if (IS_POSIXACL(dir)) { - default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); - if (IS_ERR(default_acl)) - return PTR_ERR(default_acl); - - if (!default_acl) - mode &= ~current_umask(); - } - - xfs_dentry_to_name(&name, dentry); - error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); - if (unlikely(error)) - goto out_free_acl; - - inode = VFS_I(ip); - - error = xfs_init_security(inode, dir, &dentry->d_name); - if (unlikely(error)) - goto out_cleanup_inode; - - if (default_acl) { - error = -xfs_inherit_acl(inode, default_acl); - default_acl = NULL; - if (unlikely(error)) - goto out_cleanup_inode; - } - - - d_instantiate(dentry, inode); - return -error; - - out_cleanup_inode: - xfs_cleanup_inode(dir, inode, dentry); - out_free_acl: - posix_acl_release(default_acl); - return -error; -} - -STATIC int -xfs_vn_create( - struct inode *dir, - struct dentry *dentry, - int mode, - struct nameidata *nd) -{ - return xfs_vn_mknod(dir, dentry, mode, 0); -} - -STATIC int -xfs_vn_mkdir( - struct inode *dir, - struct dentry *dentry, - int mode) -{ - return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); -} - -STATIC struct dentry * -xfs_vn_lookup( - struct inode *dir, - struct dentry *dentry, - struct nameidata *nd) -{ - struct xfs_inode *cip; - struct xfs_name name; - int error; - - if (dentry->d_name.len >= MAXNAMELEN) - return ERR_PTR(-ENAMETOOLONG); - - xfs_dentry_to_name(&name, dentry); - error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); - if (unlikely(error)) { - if (unlikely(error != ENOENT)) - return ERR_PTR(-error); - d_add(dentry, NULL); - return NULL; - } - - return d_splice_alias(VFS_I(cip), dentry); -} - -STATIC struct dentry * -xfs_vn_ci_lookup( - struct inode *dir, - struct dentry *dentry, - struct nameidata *nd) -{ - struct xfs_inode *ip; - struct xfs_name xname; - struct xfs_name ci_name; - struct qstr dname; - int error; - - if (dentry->d_name.len >= MAXNAMELEN) - return ERR_PTR(-ENAMETOOLONG); - - xfs_dentry_to_name(&xname, dentry); - error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); - if (unlikely(error)) { - if (unlikely(error != ENOENT)) - return ERR_PTR(-error); - /* - * call d_add(dentry, NULL) here when d_drop_negative_children - * is called in xfs_vn_mknod (ie. allow negative dentries - * with CI filesystems). - */ - return NULL; - } - - /* if exact match, just splice and exit */ - if (!ci_name.name) - return d_splice_alias(VFS_I(ip), dentry); - - /* else case-insensitive match... */ - dname.name = ci_name.name; - dname.len = ci_name.len; - dentry = d_add_ci(dentry, VFS_I(ip), &dname); - kmem_free(ci_name.name); - return dentry; -} - -STATIC int -xfs_vn_link( - struct dentry *old_dentry, - struct inode *dir, - struct dentry *dentry) -{ - struct inode *inode = old_dentry->d_inode; - struct xfs_name name; - int error; - - xfs_dentry_to_name(&name, dentry); - - error = xfs_link(XFS_I(dir), XFS_I(inode), &name); - if (unlikely(error)) - return -error; - - ihold(inode); - d_instantiate(dentry, inode); - return 0; -} - -STATIC int -xfs_vn_unlink( - struct inode *dir, - struct dentry *dentry) -{ - struct xfs_name name; - int error; - - xfs_dentry_to_name(&name, dentry); - - error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); - if (error) - return error; - - /* - * With unlink, the VFS makes the dentry "negative": no inode, - * but still hashed. This is incompatible with case-insensitive - * mode, so invalidate (unhash) the dentry in CI-mode. - */ - if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) - d_invalidate(dentry); - return 0; -} - -STATIC int -xfs_vn_symlink( - struct inode *dir, - struct dentry *dentry, - const char *symname) -{ - struct inode *inode; - struct xfs_inode *cip = NULL; - struct xfs_name name; - int error; - mode_t mode; - - mode = S_IFLNK | - (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); - xfs_dentry_to_name(&name, dentry); - - error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); - if (unlikely(error)) - goto out; - - inode = VFS_I(cip); - - error = xfs_init_security(inode, dir, &dentry->d_name); - if (unlikely(error)) - goto out_cleanup_inode; - - d_instantiate(dentry, inode); - return 0; - - out_cleanup_inode: - xfs_cleanup_inode(dir, inode, dentry); - out: - return -error; -} - -STATIC int -xfs_vn_rename( - struct inode *odir, - struct dentry *odentry, - struct inode *ndir, - struct dentry *ndentry) -{ - struct inode *new_inode = ndentry->d_inode; - struct xfs_name oname; - struct xfs_name nname; - - xfs_dentry_to_name(&oname, odentry); - xfs_dentry_to_name(&nname, ndentry); - - return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), - XFS_I(ndir), &nname, new_inode ? - XFS_I(new_inode) : NULL); -} - -/* - * careful here - this function can get called recursively, so - * we need to be very careful about how much stack we use. - * uio is kmalloced for this reason... - */ -STATIC void * -xfs_vn_follow_link( - struct dentry *dentry, - struct nameidata *nd) -{ - char *link; - int error = -ENOMEM; - - link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); - if (!link) - goto out_err; - - error = -xfs_readlink(XFS_I(dentry->d_inode), link); - if (unlikely(error)) - goto out_kfree; - - nd_set_link(nd, link); - return NULL; - - out_kfree: - kfree(link); - out_err: - nd_set_link(nd, ERR_PTR(error)); - return NULL; -} - -STATIC void -xfs_vn_put_link( - struct dentry *dentry, - struct nameidata *nd, - void *p) -{ - char *s = nd_get_link(nd); - - if (!IS_ERR(s)) - kfree(s); -} - -STATIC int -xfs_vn_getattr( - struct vfsmount *mnt, - struct dentry *dentry, - struct kstat *stat) -{ - struct inode *inode = dentry->d_inode; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - - trace_xfs_getattr(ip); - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - stat->size = XFS_ISIZE(ip); - stat->dev = inode->i_sb->s_dev; - stat->mode = ip->i_d.di_mode; - stat->nlink = ip->i_d.di_nlink; - stat->uid = ip->i_d.di_uid; - stat->gid = ip->i_d.di_gid; - stat->ino = ip->i_ino; - stat->atime = inode->i_atime; - stat->mtime = inode->i_mtime; - stat->ctime = inode->i_ctime; - stat->blocks = - XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); - - - switch (inode->i_mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - stat->blksize = BLKDEV_IOSIZE; - stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, - sysv_minor(ip->i_df.if_u2.if_rdev)); - break; - default: - if (XFS_IS_REALTIME_INODE(ip)) { - /* - * If the file blocks are being allocated from a - * realtime volume, then return the inode's realtime - * extent size or the realtime volume's extent size. - */ - stat->blksize = - xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; - } else - stat->blksize = xfs_preferred_iosize(mp); - stat->rdev = 0; - break; - } - - return 0; -} - -int -xfs_setattr_nonsize( - struct xfs_inode *ip, - struct iattr *iattr, - int flags) -{ - xfs_mount_t *mp = ip->i_mount; - struct inode *inode = VFS_I(ip); - int mask = iattr->ia_valid; - xfs_trans_t *tp; - int error; - uid_t uid = 0, iuid = 0; - gid_t gid = 0, igid = 0; - struct xfs_dquot *udqp = NULL, *gdqp = NULL; - struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; - - trace_xfs_setattr(ip); - - if (mp->m_flags & XFS_MOUNT_RDONLY) - return XFS_ERROR(EROFS); - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - error = -inode_change_ok(inode, iattr); - if (error) - return XFS_ERROR(error); - - ASSERT((mask & ATTR_SIZE) == 0); - - /* - * If disk quotas is on, we make sure that the dquots do exist on disk, - * before we start any other transactions. Trying to do this later - * is messy. We don't care to take a readlock to look at the ids - * in inode here, because we can't hold it across the trans_reserve. - * If the IDs do change before we take the ilock, we're covered - * because the i_*dquot fields will get updated anyway. - */ - if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { - uint qflags = 0; - - if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { - uid = iattr->ia_uid; - qflags |= XFS_QMOPT_UQUOTA; - } else { - uid = ip->i_d.di_uid; - } - if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { - gid = iattr->ia_gid; - qflags |= XFS_QMOPT_GQUOTA; - } else { - gid = ip->i_d.di_gid; - } - - /* - * We take a reference when we initialize udqp and gdqp, - * so it is important that we never blindly double trip on - * the same variable. See xfs_create() for an example. - */ - ASSERT(udqp == NULL); - ASSERT(gdqp == NULL); - error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), - qflags, &udqp, &gdqp); - if (error) - return error; - } - - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); - error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); - if (error) - goto out_dqrele; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - - /* - * Change file ownership. Must be the owner or privileged. - */ - if (mask & (ATTR_UID|ATTR_GID)) { - /* - * These IDs could have changed since we last looked at them. - * But, we're assured that if the ownership did change - * while we didn't have the inode locked, inode's dquot(s) - * would have changed also. - */ - iuid = ip->i_d.di_uid; - igid = ip->i_d.di_gid; - gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; - uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; - - /* - * Do a quota reservation only if uid/gid is actually - * going to change. - */ - if (XFS_IS_QUOTA_RUNNING(mp) && - ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || - (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { - ASSERT(tp); - error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, - capable(CAP_FOWNER) ? - XFS_QMOPT_FORCE_RES : 0); - if (error) /* out of quota */ - goto out_trans_cancel; - } - } - - xfs_trans_ijoin(tp, ip); - - /* - * Change file ownership. Must be the owner or privileged. - */ - if (mask & (ATTR_UID|ATTR_GID)) { - /* - * CAP_FSETID overrides the following restrictions: - * - * The set-user-ID and set-group-ID bits of a file will be - * cleared upon successful return from chown() - */ - if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && - !capable(CAP_FSETID)) - ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); - - /* - * Change the ownerships and register quota modifications - * in the transaction. - */ - if (iuid != uid) { - if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { - ASSERT(mask & ATTR_UID); - ASSERT(udqp); - olddquot1 = xfs_qm_vop_chown(tp, ip, - &ip->i_udquot, udqp); - } - ip->i_d.di_uid = uid; - inode->i_uid = uid; - } - if (igid != gid) { - if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { - ASSERT(!XFS_IS_PQUOTA_ON(mp)); - ASSERT(mask & ATTR_GID); - ASSERT(gdqp); - olddquot2 = xfs_qm_vop_chown(tp, ip, - &ip->i_gdquot, gdqp); - } - ip->i_d.di_gid = gid; - inode->i_gid = gid; - } - } - - /* - * Change file access modes. - */ - if (mask & ATTR_MODE) { - umode_t mode = iattr->ia_mode; - - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) - mode &= ~S_ISGID; - - ip->i_d.di_mode &= S_IFMT; - ip->i_d.di_mode |= mode & ~S_IFMT; - - inode->i_mode &= S_IFMT; - inode->i_mode |= mode & ~S_IFMT; - } - - /* - * Change file access or modified times. - */ - if (mask & ATTR_ATIME) { - inode->i_atime = iattr->ia_atime; - ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; - ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; - ip->i_update_core = 1; - } - if (mask & ATTR_CTIME) { - inode->i_ctime = iattr->ia_ctime; - ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; - ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; - ip->i_update_core = 1; - } - if (mask & ATTR_MTIME) { - inode->i_mtime = iattr->ia_mtime; - ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; - ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; - ip->i_update_core = 1; - } - - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - - XFS_STATS_INC(xs_ig_attrchg); - - if (mp->m_flags & XFS_MOUNT_WSYNC) - xfs_trans_set_sync(tp); - error = xfs_trans_commit(tp, 0); - - xfs_iunlock(ip, XFS_ILOCK_EXCL); - - /* - * Release any dquot(s) the inode had kept before chown. - */ - xfs_qm_dqrele(olddquot1); - xfs_qm_dqrele(olddquot2); - xfs_qm_dqrele(udqp); - xfs_qm_dqrele(gdqp); - - if (error) - return XFS_ERROR(error); - - /* - * XXX(hch): Updating the ACL entries is not atomic vs the i_mode - * update. We could avoid this with linked transactions - * and passing down the transaction pointer all the way - * to attr_set. No previous user of the generic - * Posix ACL code seems to care about this issue either. - */ - if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { - error = -xfs_acl_chmod(inode); - if (error) - return XFS_ERROR(error); - } - - return 0; - -out_trans_cancel: - xfs_trans_cancel(tp, 0); - xfs_iunlock(ip, XFS_ILOCK_EXCL); -out_dqrele: - xfs_qm_dqrele(udqp); - xfs_qm_dqrele(gdqp); - return error; -} - -/* - * Truncate file. Must have write permission and not be a directory. - */ -int -xfs_setattr_size( - struct xfs_inode *ip, - struct iattr *iattr, - int flags) -{ - struct xfs_mount *mp = ip->i_mount; - struct inode *inode = VFS_I(ip); - int mask = iattr->ia_valid; - struct xfs_trans *tp; - int error; - uint lock_flags; - uint commit_flags = 0; - - trace_xfs_setattr(ip); - - if (mp->m_flags & XFS_MOUNT_RDONLY) - return XFS_ERROR(EROFS); - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - error = -inode_change_ok(inode, iattr); - if (error) - return XFS_ERROR(error); - - ASSERT(S_ISREG(ip->i_d.di_mode)); - ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| - ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| - ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); - - lock_flags = XFS_ILOCK_EXCL; - if (!(flags & XFS_ATTR_NOLOCK)) - lock_flags |= XFS_IOLOCK_EXCL; - xfs_ilock(ip, lock_flags); - - /* - * Short circuit the truncate case for zero length files. - */ - if (iattr->ia_size == 0 && - ip->i_size == 0 && ip->i_d.di_nextents == 0) { - if (!(mask & (ATTR_CTIME|ATTR_MTIME))) - goto out_unlock; - - /* - * Use the regular setattr path to update the timestamps. - */ - xfs_iunlock(ip, lock_flags); - iattr->ia_valid &= ~ATTR_SIZE; - return xfs_setattr_nonsize(ip, iattr, 0); - } - - /* - * Make sure that the dquots are attached to the inode. - */ - error = xfs_qm_dqattach_locked(ip, 0); - if (error) - goto out_unlock; - - /* - * Now we can make the changes. Before we join the inode to the - * transaction, take care of the part of the truncation that must be - * done without the inode lock. This needs to be done before joining - * the inode to the transaction, because the inode cannot be unlocked - * once it is a part of the transaction. - */ - if (iattr->ia_size > ip->i_size) { - /* - * Do the first part of growing a file: zero any data in the - * last block that is beyond the old EOF. We need to do this - * before the inode is joined to the transaction to modify - * i_size. - */ - error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size); - if (error) - goto out_unlock; - } - xfs_iunlock(ip, XFS_ILOCK_EXCL); - lock_flags &= ~XFS_ILOCK_EXCL; - - /* - * We are going to log the inode size change in this transaction so - * any previous writes that are beyond the on disk EOF and the new - * EOF that have not been written out need to be written here. If we - * do not write the data out, we expose ourselves to the null files - * problem. - * - * Only flush from the on disk size to the smaller of the in memory - * file size or the new size as that's the range we really care about - * here and prevents waiting for other data not within the range we - * care about here. - */ - if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) { - error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, - XBF_ASYNC, FI_NONE); - if (error) - goto out_unlock; - } - - /* - * Wait for all I/O to complete. - */ - xfs_ioend_wait(ip); - - error = -block_truncate_page(inode->i_mapping, iattr->ia_size, - xfs_get_blocks); - if (error) - goto out_unlock; - - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); - error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, - XFS_TRANS_PERM_LOG_RES, - XFS_ITRUNCATE_LOG_COUNT); - if (error) - goto out_trans_cancel; - - truncate_setsize(inode, iattr->ia_size); - - commit_flags = XFS_TRANS_RELEASE_LOG_RES; - lock_flags |= XFS_ILOCK_EXCL; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - - xfs_trans_ijoin(tp, ip); - - /* - * Only change the c/mtime if we are changing the size or we are - * explicitly asked to change it. This handles the semantic difference - * between truncate() and ftruncate() as implemented in the VFS. - * - * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a - * special case where we need to update the times despite not having - * these flags set. For all other operations the VFS set these flags - * explicitly if it wants a timestamp update. - */ - if (iattr->ia_size != ip->i_size && - (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { - iattr->ia_ctime = iattr->ia_mtime = - current_fs_time(inode->i_sb); - mask |= ATTR_CTIME | ATTR_MTIME; - } - - if (iattr->ia_size > ip->i_size) { - ip->i_d.di_size = iattr->ia_size; - ip->i_size = iattr->ia_size; - } else if (iattr->ia_size <= ip->i_size || - (iattr->ia_size == 0 && ip->i_d.di_nextents)) { - error = xfs_itruncate_data(&tp, ip, iattr->ia_size); - if (error) - goto out_trans_abort; - - /* - * Truncated "down", so we're removing references to old data - * here - if we delay flushing for a long time, we expose - * ourselves unduly to the notorious NULL files problem. So, - * we mark this inode and flush it when the file is closed, - * and do not wait the usual (long) time for writeout. - */ - xfs_iflags_set(ip, XFS_ITRUNCATED); - } - - if (mask & ATTR_CTIME) { - inode->i_ctime = iattr->ia_ctime; - ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; - ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; - ip->i_update_core = 1; - } - if (mask & ATTR_MTIME) { - inode->i_mtime = iattr->ia_mtime; - ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; - ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; - ip->i_update_core = 1; - } - - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - - XFS_STATS_INC(xs_ig_attrchg); - - if (mp->m_flags & XFS_MOUNT_WSYNC) - xfs_trans_set_sync(tp); - - error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); -out_unlock: - if (lock_flags) - xfs_iunlock(ip, lock_flags); - return error; - -out_trans_abort: - commit_flags |= XFS_TRANS_ABORT; -out_trans_cancel: - xfs_trans_cancel(tp, commit_flags); - goto out_unlock; -} - -STATIC int -xfs_vn_setattr( - struct dentry *dentry, - struct iattr *iattr) -{ - if (iattr->ia_valid & ATTR_SIZE) - return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); - return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); -} - -#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) - -/* - * Call fiemap helper to fill in user data. - * Returns positive errors to xfs_getbmap. - */ -STATIC int -xfs_fiemap_format( - void **arg, - struct getbmapx *bmv, - int *full) -{ - int error; - struct fiemap_extent_info *fieinfo = *arg; - u32 fiemap_flags = 0; - u64 logical, physical, length; - - /* Do nothing for a hole */ - if (bmv->bmv_block == -1LL) - return 0; - - logical = BBTOB(bmv->bmv_offset); - physical = BBTOB(bmv->bmv_block); - length = BBTOB(bmv->bmv_length); - - if (bmv->bmv_oflags & BMV_OF_PREALLOC) - fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; - else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { - fiemap_flags |= FIEMAP_EXTENT_DELALLOC; - physical = 0; /* no block yet */ - } - if (bmv->bmv_oflags & BMV_OF_LAST) - fiemap_flags |= FIEMAP_EXTENT_LAST; - - error = fiemap_fill_next_extent(fieinfo, logical, physical, - length, fiemap_flags); - if (error > 0) { - error = 0; - *full = 1; /* user array now full */ - } - - return -error; -} - -STATIC int -xfs_vn_fiemap( - struct inode *inode, - struct fiemap_extent_info *fieinfo, - u64 start, - u64 length) -{ - xfs_inode_t *ip = XFS_I(inode); - struct getbmapx bm; - int error; - - error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); - if (error) - return error; - - /* Set up bmap header for xfs internal routine */ - bm.bmv_offset = BTOBB(start); - /* Special case for whole file */ - if (length == FIEMAP_MAX_OFFSET) - bm.bmv_length = -1LL; - else - bm.bmv_length = BTOBB(length); - - /* We add one because in getbmap world count includes the header */ - bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : - fieinfo->fi_extents_max + 1; - bm.bmv_count = min_t(__s32, bm.bmv_count, - (PAGE_SIZE * 16 / sizeof(struct getbmapx))); - bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; - if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) - bm.bmv_iflags |= BMV_IF_ATTRFORK; - if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) - bm.bmv_iflags |= BMV_IF_DELALLOC; - - error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); - if (error) - return -error; - - return 0; -} - -static const struct inode_operations xfs_inode_operations = { - .get_acl = xfs_get_acl, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = xfs_vn_listxattr, - .fiemap = xfs_vn_fiemap, -}; - -static const struct inode_operations xfs_dir_inode_operations = { - .create = xfs_vn_create, - .lookup = xfs_vn_lookup, - .link = xfs_vn_link, - .unlink = xfs_vn_unlink, - .symlink = xfs_vn_symlink, - .mkdir = xfs_vn_mkdir, - /* - * Yes, XFS uses the same method for rmdir and unlink. - * - * There are some subtile differences deeper in the code, - * but we use S_ISDIR to check for those. - */ - .rmdir = xfs_vn_unlink, - .mknod = xfs_vn_mknod, - .rename = xfs_vn_rename, - .get_acl = xfs_get_acl, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = xfs_vn_listxattr, -}; - -static const struct inode_operations xfs_dir_ci_inode_operations = { - .create = xfs_vn_create, - .lookup = xfs_vn_ci_lookup, - .link = xfs_vn_link, - .unlink = xfs_vn_unlink, - .symlink = xfs_vn_symlink, - .mkdir = xfs_vn_mkdir, - /* - * Yes, XFS uses the same method for rmdir and unlink. - * - * There are some subtile differences deeper in the code, - * but we use S_ISDIR to check for those. - */ - .rmdir = xfs_vn_unlink, - .mknod = xfs_vn_mknod, - .rename = xfs_vn_rename, - .get_acl = xfs_get_acl, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = xfs_vn_listxattr, -}; - -static const struct inode_operations xfs_symlink_inode_operations = { - .readlink = generic_readlink, - .follow_link = xfs_vn_follow_link, - .put_link = xfs_vn_put_link, - .get_acl = xfs_get_acl, - .getattr = xfs_vn_getattr, - .setattr = xfs_vn_setattr, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = xfs_vn_listxattr, -}; - -STATIC void -xfs_diflags_to_iflags( - struct inode *inode, - struct xfs_inode *ip) -{ - if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) - inode->i_flags |= S_IMMUTABLE; - else - inode->i_flags &= ~S_IMMUTABLE; - if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) - inode->i_flags |= S_APPEND; - else - inode->i_flags &= ~S_APPEND; - if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) - inode->i_flags |= S_SYNC; - else - inode->i_flags &= ~S_SYNC; - if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) - inode->i_flags |= S_NOATIME; - else - inode->i_flags &= ~S_NOATIME; -} - -/* - * Initialize the Linux inode, set up the operation vectors and - * unlock the inode. - * - * When reading existing inodes from disk this is called directly - * from xfs_iget, when creating a new inode it is called from - * xfs_ialloc after setting up the inode. - * - * We are always called with an uninitialised linux inode here. - * We need to initialise the necessary fields and take a reference - * on it. - */ -void -xfs_setup_inode( - struct xfs_inode *ip) -{ - struct inode *inode = &ip->i_vnode; - - inode->i_ino = ip->i_ino; - inode->i_state = I_NEW; - - inode_sb_list_add(inode); - /* make the inode look hashed for the writeback code */ - hlist_add_fake(&inode->i_hash); - - inode->i_mode = ip->i_d.di_mode; - inode->i_nlink = ip->i_d.di_nlink; - inode->i_uid = ip->i_d.di_uid; - inode->i_gid = ip->i_d.di_gid; - - switch (inode->i_mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - inode->i_rdev = - MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, - sysv_minor(ip->i_df.if_u2.if_rdev)); - break; - default: - inode->i_rdev = 0; - break; - } - - inode->i_generation = ip->i_d.di_gen; - i_size_write(inode, ip->i_d.di_size); - inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; - inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; - inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; - inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; - inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; - inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; - xfs_diflags_to_iflags(inode, ip); - - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - inode->i_op = &xfs_inode_operations; - inode->i_fop = &xfs_file_operations; - inode->i_mapping->a_ops = &xfs_address_space_operations; - break; - case S_IFDIR: - if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) - inode->i_op = &xfs_dir_ci_inode_operations; - else - inode->i_op = &xfs_dir_inode_operations; - inode->i_fop = &xfs_dir_file_operations; - break; - case S_IFLNK: - inode->i_op = &xfs_symlink_inode_operations; - if (!(ip->i_df.if_flags & XFS_IFINLINE)) - inode->i_mapping->a_ops = &xfs_address_space_operations; - break; - default: - inode->i_op = &xfs_inode_operations; - init_special_inode(inode, inode->i_mode, inode->i_rdev); - break; - } - - /* - * If there is no attribute fork no ACL can exist on this inode, - * and it can't have any file capabilities attached to it either. - */ - if (!XFS_IFORK_Q(ip)) { - inode_has_no_xattr(inode); - cache_no_acl(inode); - } - - xfs_iflags_clear(ip, XFS_INEW); - barrier(); - - unlock_new_inode(inode); -} diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h deleted file mode 100644 index ef41c92..0000000 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_IOPS_H__ -#define __XFS_IOPS_H__ - -struct xfs_inode; - -extern const struct file_operations xfs_file_operations; -extern const struct file_operations xfs_dir_file_operations; - -extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); - -extern void xfs_setup_inode(struct xfs_inode *); - -#endif /* __XFS_IOPS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h deleted file mode 100644 index 1e8a45e..0000000 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_LINUX__ -#define __XFS_LINUX__ - -#include - -/* - * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits. - * XFS_BIG_INUMS requires XFS_BIG_BLKNOS to be set. - */ -#if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) -# define XFS_BIG_BLKNOS 1 -# define XFS_BIG_INUMS 1 -#else -# define XFS_BIG_BLKNOS 0 -# define XFS_BIG_INUMS 0 -#endif - -#include "xfs_types.h" - -#include "kmem.h" -#include "mrlock.h" -#include "time.h" -#include "uuid.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "xfs_vnode.h" -#include "xfs_stats.h" -#include "xfs_sysctl.h" -#include "xfs_iops.h" -#include "xfs_aops.h" -#include "xfs_super.h" -#include "xfs_buf.h" -#include "xfs_message.h" - -#ifdef __BIG_ENDIAN -#define XFS_NATIVE_HOST 1 -#else -#undef XFS_NATIVE_HOST -#endif - -/* - * Feature macros (disable/enable) - */ -#ifdef CONFIG_SMP -#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ -#else -#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ -#endif - -#define irix_sgid_inherit xfs_params.sgid_inherit.val -#define irix_symlink_mode xfs_params.symlink_mode.val -#define xfs_panic_mask xfs_params.panic_mask.val -#define xfs_error_level xfs_params.error_level.val -#define xfs_syncd_centisecs xfs_params.syncd_timer.val -#define xfs_stats_clear xfs_params.stats_clear.val -#define xfs_inherit_sync xfs_params.inherit_sync.val -#define xfs_inherit_nodump xfs_params.inherit_nodump.val -#define xfs_inherit_noatime xfs_params.inherit_noatim.val -#define xfs_buf_timer_centisecs xfs_params.xfs_buf_timer.val -#define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val -#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val -#define xfs_rotorstep xfs_params.rotorstep.val -#define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val -#define xfs_fstrm_centisecs xfs_params.fstrm_timer.val - -#define current_cpu() (raw_smp_processor_id()) -#define current_pid() (current->pid) -#define current_test_flags(f) (current->flags & (f)) -#define current_set_flags_nested(sp, f) \ - (*(sp) = current->flags, current->flags |= (f)) -#define current_clear_flags_nested(sp, f) \ - (*(sp) = current->flags, current->flags &= ~(f)) -#define current_restore_flags_nested(sp, f) \ - (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) - -#define spinlock_destroy(lock) - -#define NBBY 8 /* number of bits per byte */ - -/* - * Size of block device i/o is parameterized here. - * Currently the system supports page-sized i/o. - */ -#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT -#define BLKDEV_IOSIZE (1<> 32; - __low = c; - if (__high) { - __upper = __high % (b); - __high = __high / (b); - } - asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); - asm("":"=A" (c):"a" (__low),"d" (__high)); - *(__u64 *)a = c; - return __mod; - } - } - - /* NOTREACHED */ - return 0; -} - -/* Side effect free 64 bit mod operation */ -static inline __u32 xfs_do_mod(void *a, __u32 b, int n) -{ - switch (n) { - case 4: - return *(__u32 *)a % b; - case 8: - { - unsigned long __upper, __low, __high, __mod; - __u64 c = *(__u64 *)a; - __upper = __high = c >> 32; - __low = c; - if (__high) { - __upper = __high % (b); - __high = __high / (b); - } - asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); - asm("":"=A" (c):"a" (__low),"d" (__high)); - return __mod; - } - } - - /* NOTREACHED */ - return 0; -} -#else -static inline __u32 xfs_do_div(void *a, __u32 b, int n) -{ - __u32 mod; - - switch (n) { - case 4: - mod = *(__u32 *)a % b; - *(__u32 *)a = *(__u32 *)a / b; - return mod; - case 8: - mod = do_div(*(__u64 *)a, b); - return mod; - } - - /* NOTREACHED */ - return 0; -} - -/* Side effect free 64 bit mod operation */ -static inline __u32 xfs_do_mod(void *a, __u32 b, int n) -{ - switch (n) { - case 4: - return *(__u32 *)a % b; - case 8: - { - __u64 c = *(__u64 *)a; - return do_div(c, b); - } - } - - /* NOTREACHED */ - return 0; -} -#endif - -#undef do_div -#define do_div(a, b) xfs_do_div(&(a), (b), sizeof(a)) -#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a)) - -static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y) -{ - x += y - 1; - do_div(x, y); - return(x * y); -} - -static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) -{ - x += y - 1; - do_div(x, y); - return x; -} - -/* ARM old ABI has some weird alignment/padding */ -#if defined(__arm__) && !defined(__ARM_EABI__) -#define __arch_pack __attribute__((packed)) -#else -#define __arch_pack -#endif - -#define ASSERT_ALWAYS(expr) \ - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) - -#ifndef DEBUG -#define ASSERT(expr) ((void)0) - -#ifndef STATIC -# define STATIC static noinline -#endif - -#else /* DEBUG */ - -#define ASSERT(expr) \ - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) - -#ifndef STATIC -# define STATIC noinline -#endif - -#endif /* DEBUG */ - -#endif /* __XFS_LINUX__ */ diff --git a/fs/xfs/linux-2.6/xfs_message.c b/fs/xfs/linux-2.6/xfs_message.c deleted file mode 100644 index bd672de..0000000 --- a/fs/xfs/linux-2.6/xfs_message.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2011 Red Hat, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_types.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_mount.h" - -/* - * XFS logging functions - */ -static void -__xfs_printk( - const char *level, - const struct xfs_mount *mp, - struct va_format *vaf) -{ - if (mp && mp->m_fsname) { - printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); - return; - } - printk("%sXFS: %pV\n", level, vaf); -} - -#define define_xfs_printk_level(func, kern_level) \ -void func(const struct xfs_mount *mp, const char *fmt, ...) \ -{ \ - struct va_format vaf; \ - va_list args; \ - \ - va_start(args, fmt); \ - \ - vaf.fmt = fmt; \ - vaf.va = &args; \ - \ - __xfs_printk(kern_level, mp, &vaf); \ - va_end(args); \ -} \ - -define_xfs_printk_level(xfs_emerg, KERN_EMERG); -define_xfs_printk_level(xfs_alert, KERN_ALERT); -define_xfs_printk_level(xfs_crit, KERN_CRIT); -define_xfs_printk_level(xfs_err, KERN_ERR); -define_xfs_printk_level(xfs_warn, KERN_WARNING); -define_xfs_printk_level(xfs_notice, KERN_NOTICE); -define_xfs_printk_level(xfs_info, KERN_INFO); -#ifdef DEBUG -define_xfs_printk_level(xfs_debug, KERN_DEBUG); -#endif - -void -xfs_alert_tag( - const struct xfs_mount *mp, - int panic_tag, - const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - int do_panic = 0; - - if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { - xfs_alert(mp, "Transforming an alert into a BUG."); - do_panic = 1; - } - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - - __xfs_printk(KERN_ALERT, mp, &vaf); - va_end(args); - - BUG_ON(do_panic); -} - -void -assfail(char *expr, char *file, int line) -{ - xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d", - expr, file, line); - BUG(); -} - -void -xfs_hex_dump(void *p, int length) -{ - print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); -} diff --git a/fs/xfs/linux-2.6/xfs_message.h b/fs/xfs/linux-2.6/xfs_message.h deleted file mode 100644 index 7fb7ea0..0000000 --- a/fs/xfs/linux-2.6/xfs_message.h +++ /dev/null @@ -1,39 +0,0 @@ -#ifndef __XFS_MESSAGE_H -#define __XFS_MESSAGE_H 1 - -struct xfs_mount; - -extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void xfs_alert_tag(const struct xfs_mount *mp, int tag, - const char *fmt, ...) - __attribute__ ((format (printf, 3, 4))); -extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); - -#ifdef DEBUG -extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) - __attribute__ ((format (printf, 2, 3))); -#else -static inline void -__attribute__ ((format (printf, 2, 3))) -xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) -{ -} -#endif - -extern void assfail(char *expr, char *f, int l); - -extern void xfs_hex_dump(void *p, int length); - -#endif /* __XFS_MESSAGE_H */ diff --git a/fs/xfs/linux-2.6/xfs_quotaops.c b/fs/xfs/linux-2.6/xfs_quotaops.c deleted file mode 100644 index 7e76f53..0000000 --- a/fs/xfs/linux-2.6/xfs_quotaops.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (c) 2008, Christoph Hellwig - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_sb.h" -#include "xfs_inum.h" -#include "xfs_log.h" -#include "xfs_ag.h" -#include "xfs_mount.h" -#include "xfs_quota.h" -#include "xfs_trans.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_qm.h" -#include - - -STATIC int -xfs_quota_type(int type) -{ - switch (type) { - case USRQUOTA: - return XFS_DQ_USER; - case GRPQUOTA: - return XFS_DQ_GROUP; - default: - return XFS_DQ_PROJ; - } -} - -STATIC int -xfs_fs_get_xstate( - struct super_block *sb, - struct fs_quota_stat *fqs) -{ - struct xfs_mount *mp = XFS_M(sb); - - if (!XFS_IS_QUOTA_RUNNING(mp)) - return -ENOSYS; - return -xfs_qm_scall_getqstat(mp, fqs); -} - -STATIC int -xfs_fs_set_xstate( - struct super_block *sb, - unsigned int uflags, - int op) -{ - struct xfs_mount *mp = XFS_M(sb); - unsigned int flags = 0; - - if (sb->s_flags & MS_RDONLY) - return -EROFS; - if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) - return -ENOSYS; - - if (uflags & FS_QUOTA_UDQ_ACCT) - flags |= XFS_UQUOTA_ACCT; - if (uflags & FS_QUOTA_PDQ_ACCT) - flags |= XFS_PQUOTA_ACCT; - if (uflags & FS_QUOTA_GDQ_ACCT) - flags |= XFS_GQUOTA_ACCT; - if (uflags & FS_QUOTA_UDQ_ENFD) - flags |= XFS_UQUOTA_ENFD; - if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD)) - flags |= XFS_OQUOTA_ENFD; - - switch (op) { - case Q_XQUOTAON: - return -xfs_qm_scall_quotaon(mp, flags); - case Q_XQUOTAOFF: - if (!XFS_IS_QUOTA_ON(mp)) - return -EINVAL; - return -xfs_qm_scall_quotaoff(mp, flags); - case Q_XQUOTARM: - if (XFS_IS_QUOTA_ON(mp)) - return -EINVAL; - return -xfs_qm_scall_trunc_qfiles(mp, flags); - } - - return -EINVAL; -} - -STATIC int -xfs_fs_get_dqblk( - struct super_block *sb, - int type, - qid_t id, - struct fs_disk_quota *fdq) -{ - struct xfs_mount *mp = XFS_M(sb); - - if (!XFS_IS_QUOTA_RUNNING(mp)) - return -ENOSYS; - if (!XFS_IS_QUOTA_ON(mp)) - return -ESRCH; - - return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq); -} - -STATIC int -xfs_fs_set_dqblk( - struct super_block *sb, - int type, - qid_t id, - struct fs_disk_quota *fdq) -{ - struct xfs_mount *mp = XFS_M(sb); - - if (sb->s_flags & MS_RDONLY) - return -EROFS; - if (!XFS_IS_QUOTA_RUNNING(mp)) - return -ENOSYS; - if (!XFS_IS_QUOTA_ON(mp)) - return -ESRCH; - - return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); -} - -const struct quotactl_ops xfs_quotactl_operations = { - .get_xstate = xfs_fs_get_xstate, - .set_xstate = xfs_fs_set_xstate, - .get_dqblk = xfs_fs_get_dqblk, - .set_dqblk = xfs_fs_set_dqblk, -}; diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c deleted file mode 100644 index 76fdc58..0000000 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include - -DEFINE_PER_CPU(struct xfsstats, xfsstats); - -static int xfs_stat_proc_show(struct seq_file *m, void *v) -{ - int c, i, j, val; - __uint64_t xs_xstrat_bytes = 0; - __uint64_t xs_write_bytes = 0; - __uint64_t xs_read_bytes = 0; - - static const struct xstats_entry { - char *desc; - int endpoint; - } xstats[] = { - { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC }, - { "abt", XFSSTAT_END_ALLOC_BTREE }, - { "blk_map", XFSSTAT_END_BLOCK_MAPPING }, - { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE }, - { "dir", XFSSTAT_END_DIRECTORY_OPS }, - { "trans", XFSSTAT_END_TRANSACTIONS }, - { "ig", XFSSTAT_END_INODE_OPS }, - { "log", XFSSTAT_END_LOG_OPS }, - { "push_ail", XFSSTAT_END_TAIL_PUSHING }, - { "xstrat", XFSSTAT_END_WRITE_CONVERT }, - { "rw", XFSSTAT_END_READ_WRITE_OPS }, - { "attr", XFSSTAT_END_ATTRIBUTE_OPS }, - { "icluster", XFSSTAT_END_INODE_CLUSTER }, - { "vnodes", XFSSTAT_END_VNODE_OPS }, - { "buf", XFSSTAT_END_BUF }, - { "abtb2", XFSSTAT_END_ABTB_V2 }, - { "abtc2", XFSSTAT_END_ABTC_V2 }, - { "bmbt2", XFSSTAT_END_BMBT_V2 }, - { "ibt2", XFSSTAT_END_IBT_V2 }, - }; - - /* Loop over all stats groups */ - for (i=j = 0; i < ARRAY_SIZE(xstats); i++) { - seq_printf(m, "%s", xstats[i].desc); - /* inner loop does each group */ - while (j < xstats[i].endpoint) { - val = 0; - /* sum over all cpus */ - for_each_possible_cpu(c) - val += *(((__u32*)&per_cpu(xfsstats, c) + j)); - seq_printf(m, " %u", val); - j++; - } - seq_putc(m, '\n'); - } - /* extra precision counters */ - for_each_possible_cpu(i) { - xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; - xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; - xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; - } - - seq_printf(m, "xpc %Lu %Lu %Lu\n", - xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); - seq_printf(m, "debug %u\n", -#if defined(DEBUG) - 1); -#else - 0); -#endif - return 0; -} - -static int xfs_stat_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, xfs_stat_proc_show, NULL); -} - -static const struct file_operations xfs_stat_proc_fops = { - .owner = THIS_MODULE, - .open = xfs_stat_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -int -xfs_init_procfs(void) -{ - if (!proc_mkdir("fs/xfs", NULL)) - goto out; - - if (!proc_create("fs/xfs/stat", 0, NULL, - &xfs_stat_proc_fops)) - goto out_remove_entry; - return 0; - - out_remove_entry: - remove_proc_entry("fs/xfs", NULL); - out: - return -ENOMEM; -} - -void -xfs_cleanup_procfs(void) -{ - remove_proc_entry("fs/xfs/stat", NULL); - remove_proc_entry("fs/xfs", NULL); -} diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h deleted file mode 100644 index 736854b..0000000 --- a/fs/xfs/linux-2.6/xfs_stats.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright (c) 2000,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_STATS_H__ -#define __XFS_STATS_H__ - - -#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) - -#include - -/* - * XFS global statistics - */ -struct xfsstats { -# define XFSSTAT_END_EXTENT_ALLOC 4 - __uint32_t xs_allocx; - __uint32_t xs_allocb; - __uint32_t xs_freex; - __uint32_t xs_freeb; -# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4) - __uint32_t xs_abt_lookup; - __uint32_t xs_abt_compare; - __uint32_t xs_abt_insrec; - __uint32_t xs_abt_delrec; -# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7) - __uint32_t xs_blk_mapr; - __uint32_t xs_blk_mapw; - __uint32_t xs_blk_unmap; - __uint32_t xs_add_exlist; - __uint32_t xs_del_exlist; - __uint32_t xs_look_exlist; - __uint32_t xs_cmp_exlist; -# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4) - __uint32_t xs_bmbt_lookup; - __uint32_t xs_bmbt_compare; - __uint32_t xs_bmbt_insrec; - __uint32_t xs_bmbt_delrec; -# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4) - __uint32_t xs_dir_lookup; - __uint32_t xs_dir_create; - __uint32_t xs_dir_remove; - __uint32_t xs_dir_getdents; -# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3) - __uint32_t xs_trans_sync; - __uint32_t xs_trans_async; - __uint32_t xs_trans_empty; -# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7) - __uint32_t xs_ig_attempts; - __uint32_t xs_ig_found; - __uint32_t xs_ig_frecycle; - __uint32_t xs_ig_missed; - __uint32_t xs_ig_dup; - __uint32_t xs_ig_reclaims; - __uint32_t xs_ig_attrchg; -# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5) - __uint32_t xs_log_writes; - __uint32_t xs_log_blocks; - __uint32_t xs_log_noiclogs; - __uint32_t xs_log_force; - __uint32_t xs_log_force_sleep; -# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10) - __uint32_t xs_try_logspace; - __uint32_t xs_sleep_logspace; - __uint32_t xs_push_ail; - __uint32_t xs_push_ail_success; - __uint32_t xs_push_ail_pushbuf; - __uint32_t xs_push_ail_pinned; - __uint32_t xs_push_ail_locked; - __uint32_t xs_push_ail_flushing; - __uint32_t xs_push_ail_restarts; - __uint32_t xs_push_ail_flush; -# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2) - __uint32_t xs_xstrat_quick; - __uint32_t xs_xstrat_split; -# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2) - __uint32_t xs_write_calls; - __uint32_t xs_read_calls; -# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4) - __uint32_t xs_attr_get; - __uint32_t xs_attr_set; - __uint32_t xs_attr_remove; - __uint32_t xs_attr_list; -# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3) - __uint32_t xs_iflush_count; - __uint32_t xs_icluster_flushcnt; - __uint32_t xs_icluster_flushinode; -# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8) - __uint32_t vn_active; /* # vnodes not on free lists */ - __uint32_t vn_alloc; /* # times vn_alloc called */ - __uint32_t vn_get; /* # times vn_get called */ - __uint32_t vn_hold; /* # times vn_hold called */ - __uint32_t vn_rele; /* # times vn_rele called */ - __uint32_t vn_reclaim; /* # times vn_reclaim called */ - __uint32_t vn_remove; /* # times vn_remove called */ - __uint32_t vn_free; /* # times vn_free called */ -#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9) - __uint32_t xb_get; - __uint32_t xb_create; - __uint32_t xb_get_locked; - __uint32_t xb_get_locked_waited; - __uint32_t xb_busy_locked; - __uint32_t xb_miss_locked; - __uint32_t xb_page_retries; - __uint32_t xb_page_found; - __uint32_t xb_get_read; -/* Version 2 btree counters */ -#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF+15) - __uint32_t xs_abtb_2_lookup; - __uint32_t xs_abtb_2_compare; - __uint32_t xs_abtb_2_insrec; - __uint32_t xs_abtb_2_delrec; - __uint32_t xs_abtb_2_newroot; - __uint32_t xs_abtb_2_killroot; - __uint32_t xs_abtb_2_increment; - __uint32_t xs_abtb_2_decrement; - __uint32_t xs_abtb_2_lshift; - __uint32_t xs_abtb_2_rshift; - __uint32_t xs_abtb_2_split; - __uint32_t xs_abtb_2_join; - __uint32_t xs_abtb_2_alloc; - __uint32_t xs_abtb_2_free; - __uint32_t xs_abtb_2_moves; -#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2+15) - __uint32_t xs_abtc_2_lookup; - __uint32_t xs_abtc_2_compare; - __uint32_t xs_abtc_2_insrec; - __uint32_t xs_abtc_2_delrec; - __uint32_t xs_abtc_2_newroot; - __uint32_t xs_abtc_2_killroot; - __uint32_t xs_abtc_2_increment; - __uint32_t xs_abtc_2_decrement; - __uint32_t xs_abtc_2_lshift; - __uint32_t xs_abtc_2_rshift; - __uint32_t xs_abtc_2_split; - __uint32_t xs_abtc_2_join; - __uint32_t xs_abtc_2_alloc; - __uint32_t xs_abtc_2_free; - __uint32_t xs_abtc_2_moves; -#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2+15) - __uint32_t xs_bmbt_2_lookup; - __uint32_t xs_bmbt_2_compare; - __uint32_t xs_bmbt_2_insrec; - __uint32_t xs_bmbt_2_delrec; - __uint32_t xs_bmbt_2_newroot; - __uint32_t xs_bmbt_2_killroot; - __uint32_t xs_bmbt_2_increment; - __uint32_t xs_bmbt_2_decrement; - __uint32_t xs_bmbt_2_lshift; - __uint32_t xs_bmbt_2_rshift; - __uint32_t xs_bmbt_2_split; - __uint32_t xs_bmbt_2_join; - __uint32_t xs_bmbt_2_alloc; - __uint32_t xs_bmbt_2_free; - __uint32_t xs_bmbt_2_moves; -#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2+15) - __uint32_t xs_ibt_2_lookup; - __uint32_t xs_ibt_2_compare; - __uint32_t xs_ibt_2_insrec; - __uint32_t xs_ibt_2_delrec; - __uint32_t xs_ibt_2_newroot; - __uint32_t xs_ibt_2_killroot; - __uint32_t xs_ibt_2_increment; - __uint32_t xs_ibt_2_decrement; - __uint32_t xs_ibt_2_lshift; - __uint32_t xs_ibt_2_rshift; - __uint32_t xs_ibt_2_split; - __uint32_t xs_ibt_2_join; - __uint32_t xs_ibt_2_alloc; - __uint32_t xs_ibt_2_free; - __uint32_t xs_ibt_2_moves; -/* Extra precision counters */ - __uint64_t xs_xstrat_bytes; - __uint64_t xs_write_bytes; - __uint64_t xs_read_bytes; -}; - -DECLARE_PER_CPU(struct xfsstats, xfsstats); - -/* - * We don't disable preempt, not too worried about poking the - * wrong CPU's stat for now (also aggregated before reporting). - */ -#define XFS_STATS_INC(v) (per_cpu(xfsstats, current_cpu()).v++) -#define XFS_STATS_DEC(v) (per_cpu(xfsstats, current_cpu()).v--) -#define XFS_STATS_ADD(v, inc) (per_cpu(xfsstats, current_cpu()).v += (inc)) - -extern int xfs_init_procfs(void); -extern void xfs_cleanup_procfs(void); - - -#else /* !CONFIG_PROC_FS */ - -# define XFS_STATS_INC(count) -# define XFS_STATS_DEC(count) -# define XFS_STATS_ADD(count, inc) - -static inline int xfs_init_procfs(void) -{ - return 0; -} - -static inline void xfs_cleanup_procfs(void) -{ -} - -#endif /* !CONFIG_PROC_FS */ - -#endif /* __XFS_STATS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c deleted file mode 100644 index 9a72dda..0000000 --- a/fs/xfs/linux-2.6/xfs_super.c +++ /dev/null @@ -1,1773 +0,0 @@ -/* - * Copyright (c) 2000-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "xfs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_dir2.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_alloc_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_btree.h" -#include "xfs_ialloc.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_itable.h" -#include "xfs_fsops.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_utils.h" -#include "xfs_vnodeops.h" -#include "xfs_log_priv.h" -#include "xfs_trans_priv.h" -#include "xfs_filestream.h" -#include "xfs_da_btree.h" -#include "xfs_extfree_item.h" -#include "xfs_mru_cache.h" -#include "xfs_inode_item.h" -#include "xfs_sync.h" -#include "xfs_trace.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static const struct super_operations xfs_super_operations; -static kmem_zone_t *xfs_ioend_zone; -mempool_t *xfs_ioend_pool; - -#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ -#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ -#define MNTOPT_LOGDEV "logdev" /* log device */ -#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ -#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ -#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ -#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ -#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ -#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ -#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ -#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ -#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ -#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ -#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ -#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ -#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ -#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ -#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ -#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and - * unwritten extent conversion */ -#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ -#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ -#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ -#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ -#define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ -#define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes - * in stat(). */ -#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ -#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ -#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ -#define MNTOPT_QUOTA "quota" /* disk quotas (user) */ -#define MNTOPT_NOQUOTA "noquota" /* no quotas */ -#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ -#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ -#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ -#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ -#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ -#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ -#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ -#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ -#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ -#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ -#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ -#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ -#define MNTOPT_DISCARD "discard" /* Discard unused blocks */ -#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ - -/* - * Table driven mount option parser. - * - * Currently only used for remount, but it will be used for mount - * in the future, too. - */ -enum { - Opt_barrier, Opt_nobarrier, Opt_err -}; - -static const match_table_t tokens = { - {Opt_barrier, "barrier"}, - {Opt_nobarrier, "nobarrier"}, - {Opt_err, NULL} -}; - - -STATIC unsigned long -suffix_strtoul(char *s, char **endp, unsigned int base) -{ - int last, shift_left_factor = 0; - char *value = s; - - last = strlen(value) - 1; - if (value[last] == 'K' || value[last] == 'k') { - shift_left_factor = 10; - value[last] = '\0'; - } - if (value[last] == 'M' || value[last] == 'm') { - shift_left_factor = 20; - value[last] = '\0'; - } - if (value[last] == 'G' || value[last] == 'g') { - shift_left_factor = 30; - value[last] = '\0'; - } - - return simple_strtoul((const char *)s, endp, base) << shift_left_factor; -} - -/* - * This function fills in xfs_mount_t fields based on mount args. - * Note: the superblock has _not_ yet been read in. - * - * Note that this function leaks the various device name allocations on - * failure. The caller takes care of them. - */ -STATIC int -xfs_parseargs( - struct xfs_mount *mp, - char *options) -{ - struct super_block *sb = mp->m_super; - char *this_char, *value, *eov; - int dsunit = 0; - int dswidth = 0; - int iosize = 0; - __uint8_t iosizelog = 0; - - /* - * set up the mount name first so all the errors will refer to the - * correct device. - */ - mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); - if (!mp->m_fsname) - return ENOMEM; - mp->m_fsname_len = strlen(mp->m_fsname) + 1; - - /* - * Copy binary VFS mount flags we are interested in. - */ - if (sb->s_flags & MS_RDONLY) - mp->m_flags |= XFS_MOUNT_RDONLY; - if (sb->s_flags & MS_DIRSYNC) - mp->m_flags |= XFS_MOUNT_DIRSYNC; - if (sb->s_flags & MS_SYNCHRONOUS) - mp->m_flags |= XFS_MOUNT_WSYNC; - - /* - * Set some default flags that could be cleared by the mount option - * parsing. - */ - mp->m_flags |= XFS_MOUNT_BARRIER; - mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; - mp->m_flags |= XFS_MOUNT_DELAYLOG; - - /* - * These can be overridden by the mount option parsing. - */ - mp->m_logbufs = -1; - mp->m_logbsize = -1; - - if (!options) - goto done; - - while ((this_char = strsep(&options, ",")) != NULL) { - if (!*this_char) - continue; - if ((value = strchr(this_char, '=')) != NULL) - *value++ = 0; - - if (!strcmp(this_char, MNTOPT_LOGBUFS)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - mp->m_logbufs = simple_strtoul(value, &eov, 10); - } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - mp->m_logbsize = suffix_strtoul(value, &eov, 10); - } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); - if (!mp->m_logname) - return ENOMEM; - } else if (!strcmp(this_char, MNTOPT_MTPT)) { - xfs_warn(mp, "%s option not allowed on this system", - this_char); - return EINVAL; - } else if (!strcmp(this_char, MNTOPT_RTDEV)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); - if (!mp->m_rtname) - return ENOMEM; - } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - iosize = simple_strtoul(value, &eov, 10); - iosizelog = ffs(iosize) - 1; - } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - iosize = suffix_strtoul(value, &eov, 10); - iosizelog = ffs(iosize) - 1; - } else if (!strcmp(this_char, MNTOPT_GRPID) || - !strcmp(this_char, MNTOPT_BSDGROUPS)) { - mp->m_flags |= XFS_MOUNT_GRPID; - } else if (!strcmp(this_char, MNTOPT_NOGRPID) || - !strcmp(this_char, MNTOPT_SYSVGROUPS)) { - mp->m_flags &= ~XFS_MOUNT_GRPID; - } else if (!strcmp(this_char, MNTOPT_WSYNC)) { - mp->m_flags |= XFS_MOUNT_WSYNC; - } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { - mp->m_flags |= XFS_MOUNT_NORECOVERY; - } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { - mp->m_flags |= XFS_MOUNT_NOALIGN; - } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { - mp->m_flags |= XFS_MOUNT_SWALLOC; - } else if (!strcmp(this_char, MNTOPT_SUNIT)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - dsunit = simple_strtoul(value, &eov, 10); - } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { - if (!value || !*value) { - xfs_warn(mp, "%s option requires an argument", - this_char); - return EINVAL; - } - dswidth = simple_strtoul(value, &eov, 10); - } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { - mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; -#if !XFS_BIG_INUMS - xfs_warn(mp, "%s option not allowed on this system", - this_char); - return EINVAL; -#endif - } else if (!strcmp(this_char, MNTOPT_NOUUID)) { - mp->m_flags |= XFS_MOUNT_NOUUID; - } else if (!strcmp(this_char, MNTOPT_BARRIER)) { - mp->m_flags |= XFS_MOUNT_BARRIER; - } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { - mp->m_flags &= ~XFS_MOUNT_BARRIER; - } else if (!strcmp(this_char, MNTOPT_IKEEP)) { - mp->m_flags |= XFS_MOUNT_IKEEP; - } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { - mp->m_flags &= ~XFS_MOUNT_IKEEP; - } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { - mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; - } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { - mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; - } else if (!strcmp(this_char, MNTOPT_ATTR2)) { - mp->m_flags |= XFS_MOUNT_ATTR2; - } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { - mp->m_flags &= ~XFS_MOUNT_ATTR2; - mp->m_flags |= XFS_MOUNT_NOATTR2; - } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { - mp->m_flags |= XFS_MOUNT_FILESTREAMS; - } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { - mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | - XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | - XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | - XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); - } else if (!strcmp(this_char, MNTOPT_QUOTA) || - !strcmp(this_char, MNTOPT_UQUOTA) || - !strcmp(this_char, MNTOPT_USRQUOTA)) { - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | - XFS_UQUOTA_ENFD); - } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || - !strcmp(this_char, MNTOPT_UQUOTANOENF)) { - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_UQUOTA_ENFD; - } else if (!strcmp(this_char, MNTOPT_PQUOTA) || - !strcmp(this_char, MNTOPT_PRJQUOTA)) { - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | - XFS_OQUOTA_ENFD); - } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_OQUOTA_ENFD; - } else if (!strcmp(this_char, MNTOPT_GQUOTA) || - !strcmp(this_char, MNTOPT_GRPQUOTA)) { - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | - XFS_OQUOTA_ENFD); - } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); - mp->m_qflags &= ~XFS_OQUOTA_ENFD; - } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { - mp->m_flags |= XFS_MOUNT_DELAYLOG; - } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { - mp->m_flags &= ~XFS_MOUNT_DELAYLOG; - } else if (!strcmp(this_char, MNTOPT_DISCARD)) { - mp->m_flags |= XFS_MOUNT_DISCARD; - } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { - mp->m_flags &= ~XFS_MOUNT_DISCARD; - } else if (!strcmp(this_char, "ihashsize")) { - xfs_warn(mp, - "ihashsize no longer used, option is deprecated."); - } else if (!strcmp(this_char, "osyncisdsync")) { - xfs_warn(mp, - "osyncisdsync has no effect, option is deprecated."); - } else if (!strcmp(this_char, "osyncisosync")) { - xfs_warn(mp, - "osyncisosync has no effect, option is deprecated."); - } else if (!strcmp(this_char, "irixsgid")) { - xfs_warn(mp, - "irixsgid is now a sysctl(2) variable, option is deprecated."); - } else { - xfs_warn(mp, "unknown mount option [%s].", this_char); - return EINVAL; - } - } - - /* - * no recovery flag requires a read-only mount - */ - if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && - !(mp->m_flags & XFS_MOUNT_RDONLY)) { - xfs_warn(mp, "no-recovery mounts must be read-only."); - return EINVAL; - } - - if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { - xfs_warn(mp, - "sunit and swidth options incompatible with the noalign option"); - return EINVAL; - } - - if ((mp->m_flags & XFS_MOUNT_DISCARD) && - !(mp->m_flags & XFS_MOUNT_DELAYLOG)) { - xfs_warn(mp, - "the discard option is incompatible with the nodelaylog option"); - return EINVAL; - } - -#ifndef CONFIG_XFS_QUOTA - if (XFS_IS_QUOTA_RUNNING(mp)) { - xfs_warn(mp, "quota support not available in this kernel."); - return EINVAL; - } -#endif - - if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && - (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { - xfs_warn(mp, "cannot mount with both project and group quota"); - return EINVAL; - } - - if ((dsunit && !dswidth) || (!dsunit && dswidth)) { - xfs_warn(mp, "sunit and swidth must be specified together"); - return EINVAL; - } - - if (dsunit && (dswidth % dsunit != 0)) { - xfs_warn(mp, - "stripe width (%d) must be a multiple of the stripe unit (%d)", - dswidth, dsunit); - return EINVAL; - } - -done: - if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { - /* - * At this point the superblock has not been read - * in, therefore we do not know the block size. - * Before the mount call ends we will convert - * these to FSBs. - */ - if (dsunit) { - mp->m_dalign = dsunit; - mp->m_flags |= XFS_MOUNT_RETERR; - } - - if (dswidth) - mp->m_swidth = dswidth; - } - - if (mp->m_logbufs != -1 && - mp->m_logbufs != 0 && - (mp->m_logbufs < XLOG_MIN_ICLOGS || - mp->m_logbufs > XLOG_MAX_ICLOGS)) { - xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", - mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); - return XFS_ERROR(EINVAL); - } - if (mp->m_logbsize != -1 && - mp->m_logbsize != 0 && - (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || - mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || - !is_power_of_2(mp->m_logbsize))) { - xfs_warn(mp, - "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", - mp->m_logbsize); - return XFS_ERROR(EINVAL); - } - - if (iosizelog) { - if (iosizelog > XFS_MAX_IO_LOG || - iosizelog < XFS_MIN_IO_LOG) { - xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", - iosizelog, XFS_MIN_IO_LOG, - XFS_MAX_IO_LOG); - return XFS_ERROR(EINVAL); - } - - mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; - mp->m_readio_log = iosizelog; - mp->m_writeio_log = iosizelog; - } - - return 0; -} - -struct proc_xfs_info { - int flag; - char *str; -}; - -STATIC int -xfs_showargs( - struct xfs_mount *mp, - struct seq_file *m) -{ - static struct proc_xfs_info xfs_info_set[] = { - /* the few simple ones we can get from the mount struct */ - { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, - { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, - { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, - { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, - { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, - { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, - { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, - { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, - { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, - { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, - { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, - { 0, NULL } - }; - static struct proc_xfs_info xfs_info_unset[] = { - /* the few simple ones we can get from the mount struct */ - { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, - { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, - { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, - { 0, NULL } - }; - struct proc_xfs_info *xfs_infop; - - for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { - if (mp->m_flags & xfs_infop->flag) - seq_puts(m, xfs_infop->str); - } - for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { - if (!(mp->m_flags & xfs_infop->flag)) - seq_puts(m, xfs_infop->str); - } - - if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) - seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", - (int)(1 << mp->m_writeio_log) >> 10); - - if (mp->m_logbufs > 0) - seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); - if (mp->m_logbsize > 0) - seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); - - if (mp->m_logname) - seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); - if (mp->m_rtname) - seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); - - if (mp->m_dalign > 0) - seq_printf(m, "," MNTOPT_SUNIT "=%d", - (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); - if (mp->m_swidth > 0) - seq_printf(m, "," MNTOPT_SWIDTH "=%d", - (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); - - if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) - seq_puts(m, "," MNTOPT_USRQUOTA); - else if (mp->m_qflags & XFS_UQUOTA_ACCT) - seq_puts(m, "," MNTOPT_UQUOTANOENF); - - /* Either project or group quotas can be active, not both */ - - if (mp->m_qflags & XFS_PQUOTA_ACCT) { - if (mp->m_qflags & XFS_OQUOTA_ENFD) - seq_puts(m, "," MNTOPT_PRJQUOTA); - else - seq_puts(m, "," MNTOPT_PQUOTANOENF); - } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { - if (mp->m_qflags & XFS_OQUOTA_ENFD) - seq_puts(m, "," MNTOPT_GRPQUOTA); - else - seq_puts(m, "," MNTOPT_GQUOTANOENF); - } - - if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) - seq_puts(m, "," MNTOPT_NOQUOTA); - - return 0; -} -__uint64_t -xfs_max_file_offset( - unsigned int blockshift) -{ - unsigned int pagefactor = 1; - unsigned int bitshift = BITS_PER_LONG - 1; - - /* Figure out maximum filesize, on Linux this can depend on - * the filesystem blocksize (on 32 bit platforms). - * __block_write_begin does this in an [unsigned] long... - * page->index << (PAGE_CACHE_SHIFT - bbits) - * So, for page sized blocks (4K on 32 bit platforms), - * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is - * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) - * but for smaller blocksizes it is less (bbits = log2 bsize). - * Note1: get_block_t takes a long (implicit cast from above) - * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch - * can optionally convert the [unsigned] long from above into - * an [unsigned] long long. - */ - -#if BITS_PER_LONG == 32 -# if defined(CONFIG_LBDAF) - ASSERT(sizeof(sector_t) == 8); - pagefactor = PAGE_CACHE_SIZE; - bitshift = BITS_PER_LONG; -# else - pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); -# endif -#endif - - return (((__uint64_t)pagefactor) << bitshift) - 1; -} - -STATIC int -xfs_blkdev_get( - xfs_mount_t *mp, - const char *name, - struct block_device **bdevp) -{ - int error = 0; - - *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, - mp); - if (IS_ERR(*bdevp)) { - error = PTR_ERR(*bdevp); - xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); - } - - return -error; -} - -STATIC void -xfs_blkdev_put( - struct block_device *bdev) -{ - if (bdev) - blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); -} - -void -xfs_blkdev_issue_flush( - xfs_buftarg_t *buftarg) -{ - blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL); -} - -STATIC void -xfs_close_devices( - struct xfs_mount *mp) -{ - if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { - struct block_device *logdev = mp->m_logdev_targp->bt_bdev; - xfs_free_buftarg(mp, mp->m_logdev_targp); - xfs_blkdev_put(logdev); - } - if (mp->m_rtdev_targp) { - struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; - xfs_free_buftarg(mp, mp->m_rtdev_targp); - xfs_blkdev_put(rtdev); - } - xfs_free_buftarg(mp, mp->m_ddev_targp); -} - -/* - * The file system configurations are: - * (1) device (partition) with data and internal log - * (2) logical volume with data and log subvolumes. - * (3) logical volume with data, log, and realtime subvolumes. - * - * We only have to handle opening the log and realtime volumes here if - * they are present. The data subvolume has already been opened by - * get_sb_bdev() and is stored in sb->s_bdev. - */ -STATIC int -xfs_open_devices( - struct xfs_mount *mp) -{ - struct block_device *ddev = mp->m_super->s_bdev; - struct block_device *logdev = NULL, *rtdev = NULL; - int error; - - /* - * Open real time and log devices - order is important. - */ - if (mp->m_logname) { - error = xfs_blkdev_get(mp, mp->m_logname, &logdev); - if (error) - goto out; - } - - if (mp->m_rtname) { - error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); - if (error) - goto out_close_logdev; - - if (rtdev == ddev || rtdev == logdev) { - xfs_warn(mp, - "Cannot mount filesystem with identical rtdev and ddev/logdev."); - error = EINVAL; - goto out_close_rtdev; - } - } - - /* - * Setup xfs_mount buffer target pointers - */ - error = ENOMEM; - mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); - if (!mp->m_ddev_targp) - goto out_close_rtdev; - - if (rtdev) { - mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, - mp->m_fsname); - if (!mp->m_rtdev_targp) - goto out_free_ddev_targ; - } - - if (logdev && logdev != ddev) { - mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, - mp->m_fsname); - if (!mp->m_logdev_targp) - goto out_free_rtdev_targ; - } else { - mp->m_logdev_targp = mp->m_ddev_targp; - } - - return 0; - - out_free_rtdev_targ: - if (mp->m_rtdev_targp) - xfs_free_buftarg(mp, mp->m_rtdev_targp); - out_free_ddev_targ: - xfs_free_buftarg(mp, mp->m_ddev_targp); - out_close_rtdev: - if (rtdev) - xfs_blkdev_put(rtdev); - out_close_logdev: - if (logdev && logdev != ddev) - xfs_blkdev_put(logdev); - out: - return error; -} - -/* - * Setup xfs_mount buffer target pointers based on superblock - */ -STATIC int -xfs_setup_devices( - struct xfs_mount *mp) -{ - int error; - - error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, - mp->m_sb.sb_sectsize); - if (error) - return error; - - if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { - unsigned int log_sector_size = BBSIZE; - - if (xfs_sb_version_hassector(&mp->m_sb)) - log_sector_size = mp->m_sb.sb_logsectsize; - error = xfs_setsize_buftarg(mp->m_logdev_targp, - mp->m_sb.sb_blocksize, - log_sector_size); - if (error) - return error; - } - if (mp->m_rtdev_targp) { - error = xfs_setsize_buftarg(mp->m_rtdev_targp, - mp->m_sb.sb_blocksize, - mp->m_sb.sb_sectsize); - if (error) - return error; - } - - return 0; -} - -/* Catch misguided souls that try to use this interface on XFS */ -STATIC struct inode * -xfs_fs_alloc_inode( - struct super_block *sb) -{ - BUG(); - return NULL; -} - -/* - * Now that the generic code is guaranteed not to be accessing - * the linux inode, we can reclaim the inode. - */ -STATIC void -xfs_fs_destroy_inode( - struct inode *inode) -{ - struct xfs_inode *ip = XFS_I(inode); - - trace_xfs_destroy_inode(ip); - - XFS_STATS_INC(vn_reclaim); - - /* bad inode, get out here ASAP */ - if (is_bad_inode(inode)) - goto out_reclaim; - - xfs_ioend_wait(ip); - - ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); - - /* - * We should never get here with one of the reclaim flags already set. - */ - ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); - ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); - - /* - * We always use background reclaim here because even if the - * inode is clean, it still may be under IO and hence we have - * to take the flush lock. The background reclaim path handles - * this more efficiently than we can here, so simply let background - * reclaim tear down all inodes. - */ -out_reclaim: - xfs_inode_set_reclaim_tag(ip); -} - -/* - * Slab object creation initialisation for the XFS inode. - * This covers only the idempotent fields in the XFS inode; - * all other fields need to be initialised on allocation - * from the slab. This avoids the need to repeatedly initialise - * fields in the xfs inode that left in the initialise state - * when freeing the inode. - */ -STATIC void -xfs_fs_inode_init_once( - void *inode) -{ - struct xfs_inode *ip = inode; - - memset(ip, 0, sizeof(struct xfs_inode)); - - /* vfs inode */ - inode_init_once(VFS_I(ip)); - - /* xfs inode */ - atomic_set(&ip->i_iocount, 0); - atomic_set(&ip->i_pincount, 0); - spin_lock_init(&ip->i_flags_lock); - init_waitqueue_head(&ip->i_ipin_wait); - /* - * Because we want to use a counting completion, complete - * the flush completion once to allow a single access to - * the flush completion without blocking. - */ - init_completion(&ip->i_flush); - complete(&ip->i_flush); - - mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, - "xfsino", ip->i_ino); -} - -/* - * Dirty the XFS inode when mark_inode_dirty_sync() is called so that - * we catch unlogged VFS level updates to the inode. - * - * We need the barrier() to maintain correct ordering between unlogged - * updates and the transaction commit code that clears the i_update_core - * field. This requires all updates to be completed before marking the - * inode dirty. - */ -STATIC void -xfs_fs_dirty_inode( - struct inode *inode, - int flags) -{ - barrier(); - XFS_I(inode)->i_update_core = 1; -} - -STATIC int -xfs_log_inode( - struct xfs_inode *ip) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - xfs_iunlock(ip, XFS_ILOCK_SHARED); - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - - if (error) { - xfs_trans_cancel(tp, 0); - /* we need to return with the lock hold shared */ - xfs_ilock(ip, XFS_ILOCK_SHARED); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - - /* - * Note - it's possible that we might have pushed ourselves out of the - * way during trans_reserve which would flush the inode. But there's - * no guarantee that the inode buffer has actually gone out yet (it's - * delwri). Plus the buffer could be pinned anyway if it's part of - * an inode in another recent transaction. So we play it safe and - * fire off the transaction anyway. - */ - xfs_trans_ijoin(tp, ip); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_trans_commit(tp, 0); - xfs_ilock_demote(ip, XFS_ILOCK_EXCL); - - return error; -} - -STATIC int -xfs_fs_write_inode( - struct inode *inode, - struct writeback_control *wbc) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - int error = EAGAIN; - - trace_xfs_write_inode(ip); - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - if (wbc->sync_mode == WB_SYNC_ALL) { - /* - * Make sure the inode has made it it into the log. Instead - * of forcing it all the way to stable storage using a - * synchronous transaction we let the log force inside the - * ->sync_fs call do that for thus, which reduces the number - * of synchronous log foces dramatically. - */ - xfs_ioend_wait(ip); - xfs_ilock(ip, XFS_ILOCK_SHARED); - if (ip->i_update_core) { - error = xfs_log_inode(ip); - if (error) - goto out_unlock; - } - } else { - /* - * We make this non-blocking if the inode is contended, return - * EAGAIN to indicate to the caller that they did not succeed. - * This prevents the flush path from blocking on inodes inside - * another operation right now, they get caught later by - * xfs_sync. - */ - if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) - goto out; - - if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) - goto out_unlock; - - /* - * Now we have the flush lock and the inode is not pinned, we - * can check if the inode is really clean as we know that - * there are no pending transaction completions, it is not - * waiting on the delayed write queue and there is no IO in - * progress. - */ - if (xfs_inode_clean(ip)) { - xfs_ifunlock(ip); - error = 0; - goto out_unlock; - } - error = xfs_iflush(ip, SYNC_TRYLOCK); - } - - out_unlock: - xfs_iunlock(ip, XFS_ILOCK_SHARED); - out: - /* - * if we failed to write out the inode then mark - * it dirty again so we'll try again later. - */ - if (error) - xfs_mark_inode_dirty_sync(ip); - return -error; -} - -STATIC void -xfs_fs_evict_inode( - struct inode *inode) -{ - xfs_inode_t *ip = XFS_I(inode); - - trace_xfs_evict_inode(ip); - - truncate_inode_pages(&inode->i_data, 0); - end_writeback(inode); - XFS_STATS_INC(vn_rele); - XFS_STATS_INC(vn_remove); - XFS_STATS_DEC(vn_active); - - /* - * The iolock is used by the file system to coordinate reads, - * writes, and block truncates. Up to this point the lock - * protected concurrent accesses by users of the inode. But - * from here forward we're doing some final processing of the - * inode because we're done with it, and although we reuse the - * iolock for protection it is really a distinct lock class - * (in the lockdep sense) from before. To keep lockdep happy - * (and basically indicate what we are doing), we explicitly - * re-init the iolock here. - */ - ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); - mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); - lockdep_set_class_and_name(&ip->i_iolock.mr_lock, - &xfs_iolock_reclaimable, "xfs_iolock_reclaimable"); - - xfs_inactive(ip); -} - -STATIC void -xfs_free_fsname( - struct xfs_mount *mp) -{ - kfree(mp->m_fsname); - kfree(mp->m_rtname); - kfree(mp->m_logname); -} - -STATIC void -xfs_fs_put_super( - struct super_block *sb) -{ - struct xfs_mount *mp = XFS_M(sb); - - xfs_syncd_stop(mp); - - /* - * Blow away any referenced inode in the filestreams cache. - * This can and will cause log traffic as inodes go inactive - * here. - */ - xfs_filestream_unmount(mp); - - XFS_bflush(mp->m_ddev_targp); - - xfs_unmountfs(mp); - xfs_freesb(mp); - xfs_icsb_destroy_counters(mp); - xfs_close_devices(mp); - xfs_free_fsname(mp); - kfree(mp); -} - -STATIC int -xfs_fs_sync_fs( - struct super_block *sb, - int wait) -{ - struct xfs_mount *mp = XFS_M(sb); - int error; - - /* - * Not much we can do for the first async pass. Writing out the - * superblock would be counter-productive as we are going to redirty - * when writing out other data and metadata (and writing out a single - * block is quite fast anyway). - * - * Try to asynchronously kick off quota syncing at least. - */ - if (!wait) { - xfs_qm_sync(mp, SYNC_TRYLOCK); - return 0; - } - - error = xfs_quiesce_data(mp); - if (error) - return -error; - - if (laptop_mode) { - /* - * The disk must be active because we're syncing. - * We schedule xfssyncd now (now that the disk is - * active) instead of later (when it might not be). - */ - flush_delayed_work_sync(&mp->m_sync_work); - } - - return 0; -} - -STATIC int -xfs_fs_statfs( - struct dentry *dentry, - struct kstatfs *statp) -{ - struct xfs_mount *mp = XFS_M(dentry->d_sb); - xfs_sb_t *sbp = &mp->m_sb; - struct xfs_inode *ip = XFS_I(dentry->d_inode); - __uint64_t fakeinos, id; - xfs_extlen_t lsize; - __int64_t ffree; - - statp->f_type = XFS_SB_MAGIC; - statp->f_namelen = MAXNAMELEN - 1; - - id = huge_encode_dev(mp->m_ddev_targp->bt_dev); - statp->f_fsid.val[0] = (u32)id; - statp->f_fsid.val[1] = (u32)(id >> 32); - - xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); - - spin_lock(&mp->m_sb_lock); - statp->f_bsize = sbp->sb_blocksize; - lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; - statp->f_blocks = sbp->sb_dblocks - lsize; - statp->f_bfree = statp->f_bavail = - sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); - fakeinos = statp->f_bfree << sbp->sb_inopblog; - statp->f_files = - MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); - if (mp->m_maxicount) - statp->f_files = min_t(typeof(statp->f_files), - statp->f_files, - mp->m_maxicount); - - /* make sure statp->f_ffree does not underflow */ - ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); - statp->f_ffree = max_t(__int64_t, ffree, 0); - - spin_unlock(&mp->m_sb_lock); - - if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || - ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == - (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) - xfs_qm_statvfs(ip, statp); - return 0; -} - -STATIC void -xfs_save_resvblks(struct xfs_mount *mp) -{ - __uint64_t resblks = 0; - - mp->m_resblks_save = mp->m_resblks; - xfs_reserve_blocks(mp, &resblks, NULL); -} - -STATIC void -xfs_restore_resvblks(struct xfs_mount *mp) -{ - __uint64_t resblks; - - if (mp->m_resblks_save) { - resblks = mp->m_resblks_save; - mp->m_resblks_save = 0; - } else - resblks = xfs_default_resblks(mp); - - xfs_reserve_blocks(mp, &resblks, NULL); -} - -STATIC int -xfs_fs_remount( - struct super_block *sb, - int *flags, - char *options) -{ - struct xfs_mount *mp = XFS_M(sb); - substring_t args[MAX_OPT_ARGS]; - char *p; - int error; - - while ((p = strsep(&options, ",")) != NULL) { - int token; - - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_barrier: - mp->m_flags |= XFS_MOUNT_BARRIER; - break; - case Opt_nobarrier: - mp->m_flags &= ~XFS_MOUNT_BARRIER; - break; - default: - /* - * Logically we would return an error here to prevent - * users from believing they might have changed - * mount options using remount which can't be changed. - * - * But unfortunately mount(8) adds all options from - * mtab and fstab to the mount arguments in some cases - * so we can't blindly reject options, but have to - * check for each specified option if it actually - * differs from the currently set option and only - * reject it if that's the case. - * - * Until that is implemented we return success for - * every remount request, and silently ignore all - * options that we can't actually change. - */ -#if 0 - xfs_info(mp, - "mount option \"%s\" not supported for remount\n", p); - return -EINVAL; -#else - break; -#endif - } - } - - /* ro -> rw */ - if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { - mp->m_flags &= ~XFS_MOUNT_RDONLY; - - /* - * If this is the first remount to writeable state we - * might have some superblock changes to update. - */ - if (mp->m_update_flags) { - error = xfs_mount_log_sb(mp, mp->m_update_flags); - if (error) { - xfs_warn(mp, "failed to write sb changes"); - return error; - } - mp->m_update_flags = 0; - } - - /* - * Fill out the reserve pool if it is empty. Use the stashed - * value if it is non-zero, otherwise go with the default. - */ - xfs_restore_resvblks(mp); - } - - /* rw -> ro */ - if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { - /* - * After we have synced the data but before we sync the - * metadata, we need to free up the reserve block pool so that - * the used block count in the superblock on disk is correct at - * the end of the remount. Stash the current reserve pool size - * so that if we get remounted rw, we can return it to the same - * size. - */ - - xfs_quiesce_data(mp); - xfs_save_resvblks(mp); - xfs_quiesce_attr(mp); - mp->m_flags |= XFS_MOUNT_RDONLY; - } - - return 0; -} - -/* - * Second stage of a freeze. The data is already frozen so we only - * need to take care of the metadata. Once that's done write a dummy - * record to dirty the log in case of a crash while frozen. - */ -STATIC int -xfs_fs_freeze( - struct super_block *sb) -{ - struct xfs_mount *mp = XFS_M(sb); - - xfs_save_resvblks(mp); - xfs_quiesce_attr(mp); - return -xfs_fs_log_dummy(mp); -} - -STATIC int -xfs_fs_unfreeze( - struct super_block *sb) -{ - struct xfs_mount *mp = XFS_M(sb); - - xfs_restore_resvblks(mp); - return 0; -} - -STATIC int -xfs_fs_show_options( - struct seq_file *m, - struct vfsmount *mnt) -{ - return -xfs_showargs(XFS_M(mnt->mnt_sb), m); -} - -/* - * This function fills in xfs_mount_t fields based on mount args. - * Note: the superblock _has_ now been read in. - */ -STATIC int -xfs_finish_flags( - struct xfs_mount *mp) -{ - int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); - - /* Fail a mount where the logbuf is smaller than the log stripe */ - if (xfs_sb_version_haslogv2(&mp->m_sb)) { - if (mp->m_logbsize <= 0 && - mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { - mp->m_logbsize = mp->m_sb.sb_logsunit; - } else if (mp->m_logbsize > 0 && - mp->m_logbsize < mp->m_sb.sb_logsunit) { - xfs_warn(mp, - "logbuf size must be greater than or equal to log stripe size"); - return XFS_ERROR(EINVAL); - } - } else { - /* Fail a mount if the logbuf is larger than 32K */ - if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { - xfs_warn(mp, - "logbuf size for version 1 logs must be 16K or 32K"); - return XFS_ERROR(EINVAL); - } - } - - /* - * mkfs'ed attr2 will turn on attr2 mount unless explicitly - * told by noattr2 to turn it off - */ - if (xfs_sb_version_hasattr2(&mp->m_sb) && - !(mp->m_flags & XFS_MOUNT_NOATTR2)) - mp->m_flags |= XFS_MOUNT_ATTR2; - - /* - * prohibit r/w mounts of read-only filesystems - */ - if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { - xfs_warn(mp, - "cannot mount a read-only filesystem as read-write"); - return XFS_ERROR(EROFS); - } - - return 0; -} - -STATIC int -xfs_fs_fill_super( - struct super_block *sb, - void *data, - int silent) -{ - struct inode *root; - struct xfs_mount *mp = NULL; - int flags = 0, error = ENOMEM; - - mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); - if (!mp) - goto out; - - spin_lock_init(&mp->m_sb_lock); - mutex_init(&mp->m_growlock); - atomic_set(&mp->m_active_trans, 0); - - mp->m_super = sb; - sb->s_fs_info = mp; - - error = xfs_parseargs(mp, (char *)data); - if (error) - goto out_free_fsname; - - sb_min_blocksize(sb, BBSIZE); - sb->s_xattr = xfs_xattr_handlers; - sb->s_export_op = &xfs_export_operations; -#ifdef CONFIG_XFS_QUOTA - sb->s_qcop = &xfs_quotactl_operations; -#endif - sb->s_op = &xfs_super_operations; - - if (silent) - flags |= XFS_MFSI_QUIET; - - error = xfs_open_devices(mp); - if (error) - goto out_free_fsname; - - error = xfs_icsb_init_counters(mp); - if (error) - goto out_close_devices; - - error = xfs_readsb(mp, flags); - if (error) - goto out_destroy_counters; - - error = xfs_finish_flags(mp); - if (error) - goto out_free_sb; - - error = xfs_setup_devices(mp); - if (error) - goto out_free_sb; - - error = xfs_filestream_mount(mp); - if (error) - goto out_free_sb; - - /* - * we must configure the block size in the superblock before we run the - * full mount process as the mount process can lookup and cache inodes. - * For the same reason we must also initialise the syncd and register - * the inode cache shrinker so that inodes can be reclaimed during - * operations like a quotacheck that iterate all inodes in the - * filesystem. - */ - sb->s_magic = XFS_SB_MAGIC; - sb->s_blocksize = mp->m_sb.sb_blocksize; - sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; - sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); - sb->s_time_gran = 1; - set_posix_acl_flag(sb); - - error = xfs_mountfs(mp); - if (error) - goto out_filestream_unmount; - - error = xfs_syncd_init(mp); - if (error) - goto out_unmount; - - root = igrab(VFS_I(mp->m_rootip)); - if (!root) { - error = ENOENT; - goto out_syncd_stop; - } - if (is_bad_inode(root)) { - error = EINVAL; - goto out_syncd_stop; - } - sb->s_root = d_alloc_root(root); - if (!sb->s_root) { - error = ENOMEM; - goto out_iput; - } - - return 0; - - out_filestream_unmount: - xfs_filestream_unmount(mp); - out_free_sb: - xfs_freesb(mp); - out_destroy_counters: - xfs_icsb_destroy_counters(mp); - out_close_devices: - xfs_close_devices(mp); - out_free_fsname: - xfs_free_fsname(mp); - kfree(mp); - out: - return -error; - - out_iput: - iput(root); - out_syncd_stop: - xfs_syncd_stop(mp); - out_unmount: - /* - * Blow away any referenced inode in the filestreams cache. - * This can and will cause log traffic as inodes go inactive - * here. - */ - xfs_filestream_unmount(mp); - - XFS_bflush(mp->m_ddev_targp); - - xfs_unmountfs(mp); - goto out_free_sb; -} - -STATIC struct dentry * -xfs_fs_mount( - struct file_system_type *fs_type, - int flags, - const char *dev_name, - void *data) -{ - return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); -} - -static int -xfs_fs_nr_cached_objects( - struct super_block *sb) -{ - return xfs_reclaim_inodes_count(XFS_M(sb)); -} - -static void -xfs_fs_free_cached_objects( - struct super_block *sb, - int nr_to_scan) -{ - xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); -} - -static const struct super_operations xfs_super_operations = { - .alloc_inode = xfs_fs_alloc_inode, - .destroy_inode = xfs_fs_destroy_inode, - .dirty_inode = xfs_fs_dirty_inode, - .write_inode = xfs_fs_write_inode, - .evict_inode = xfs_fs_evict_inode, - .put_super = xfs_fs_put_super, - .sync_fs = xfs_fs_sync_fs, - .freeze_fs = xfs_fs_freeze, - .unfreeze_fs = xfs_fs_unfreeze, - .statfs = xfs_fs_statfs, - .remount_fs = xfs_fs_remount, - .show_options = xfs_fs_show_options, - .nr_cached_objects = xfs_fs_nr_cached_objects, - .free_cached_objects = xfs_fs_free_cached_objects, -}; - -static struct file_system_type xfs_fs_type = { - .owner = THIS_MODULE, - .name = "xfs", - .mount = xfs_fs_mount, - .kill_sb = kill_block_super, - .fs_flags = FS_REQUIRES_DEV, -}; - -STATIC int __init -xfs_init_zones(void) -{ - - xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); - if (!xfs_ioend_zone) - goto out; - - xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, - xfs_ioend_zone); - if (!xfs_ioend_pool) - goto out_destroy_ioend_zone; - - xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), - "xfs_log_ticket"); - if (!xfs_log_ticket_zone) - goto out_destroy_ioend_pool; - - xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), - "xfs_bmap_free_item"); - if (!xfs_bmap_free_item_zone) - goto out_destroy_log_ticket_zone; - - xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), - "xfs_btree_cur"); - if (!xfs_btree_cur_zone) - goto out_destroy_bmap_free_item_zone; - - xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), - "xfs_da_state"); - if (!xfs_da_state_zone) - goto out_destroy_btree_cur_zone; - - xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); - if (!xfs_dabuf_zone) - goto out_destroy_da_state_zone; - - xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); - if (!xfs_ifork_zone) - goto out_destroy_dabuf_zone; - - xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); - if (!xfs_trans_zone) - goto out_destroy_ifork_zone; - - xfs_log_item_desc_zone = - kmem_zone_init(sizeof(struct xfs_log_item_desc), - "xfs_log_item_desc"); - if (!xfs_log_item_desc_zone) - goto out_destroy_trans_zone; - - /* - * The size of the zone allocated buf log item is the maximum - * size possible under XFS. This wastes a little bit of memory, - * but it is much faster. - */ - xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + - (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / - NBWORD) * sizeof(int))), "xfs_buf_item"); - if (!xfs_buf_item_zone) - goto out_destroy_log_item_desc_zone; - - xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + - ((XFS_EFD_MAX_FAST_EXTENTS - 1) * - sizeof(xfs_extent_t))), "xfs_efd_item"); - if (!xfs_efd_zone) - goto out_destroy_buf_item_zone; - - xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + - ((XFS_EFI_MAX_FAST_EXTENTS - 1) * - sizeof(xfs_extent_t))), "xfs_efi_item"); - if (!xfs_efi_zone) - goto out_destroy_efd_zone; - - xfs_inode_zone = - kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, - xfs_fs_inode_init_once); - if (!xfs_inode_zone) - goto out_destroy_efi_zone; - - xfs_ili_zone = - kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", - KM_ZONE_SPREAD, NULL); - if (!xfs_ili_zone) - goto out_destroy_inode_zone; - - return 0; - - out_destroy_inode_zone: - kmem_zone_destroy(xfs_inode_zone); - out_destroy_efi_zone: - kmem_zone_destroy(xfs_efi_zone); - out_destroy_efd_zone: - kmem_zone_destroy(xfs_efd_zone); - out_destroy_buf_item_zone: - kmem_zone_destroy(xfs_buf_item_zone); - out_destroy_log_item_desc_zone: - kmem_zone_destroy(xfs_log_item_desc_zone); - out_destroy_trans_zone: - kmem_zone_destroy(xfs_trans_zone); - out_destroy_ifork_zone: - kmem_zone_destroy(xfs_ifork_zone); - out_destroy_dabuf_zone: - kmem_zone_destroy(xfs_dabuf_zone); - out_destroy_da_state_zone: - kmem_zone_destroy(xfs_da_state_zone); - out_destroy_btree_cur_zone: - kmem_zone_destroy(xfs_btree_cur_zone); - out_destroy_bmap_free_item_zone: - kmem_zone_destroy(xfs_bmap_free_item_zone); - out_destroy_log_ticket_zone: - kmem_zone_destroy(xfs_log_ticket_zone); - out_destroy_ioend_pool: - mempool_destroy(xfs_ioend_pool); - out_destroy_ioend_zone: - kmem_zone_destroy(xfs_ioend_zone); - out: - return -ENOMEM; -} - -STATIC void -xfs_destroy_zones(void) -{ - kmem_zone_destroy(xfs_ili_zone); - kmem_zone_destroy(xfs_inode_zone); - kmem_zone_destroy(xfs_efi_zone); - kmem_zone_destroy(xfs_efd_zone); - kmem_zone_destroy(xfs_buf_item_zone); - kmem_zone_destroy(xfs_log_item_desc_zone); - kmem_zone_destroy(xfs_trans_zone); - kmem_zone_destroy(xfs_ifork_zone); - kmem_zone_destroy(xfs_dabuf_zone); - kmem_zone_destroy(xfs_da_state_zone); - kmem_zone_destroy(xfs_btree_cur_zone); - kmem_zone_destroy(xfs_bmap_free_item_zone); - kmem_zone_destroy(xfs_log_ticket_zone); - mempool_destroy(xfs_ioend_pool); - kmem_zone_destroy(xfs_ioend_zone); - -} - -STATIC int __init -xfs_init_workqueues(void) -{ - /* - * max_active is set to 8 to give enough concurency to allow - * multiple work operations on each CPU to run. This allows multiple - * filesystems to be running sync work concurrently, and scales with - * the number of CPUs in the system. - */ - xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); - if (!xfs_syncd_wq) - goto out; - - xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); - if (!xfs_ail_wq) - goto out_destroy_syncd; - - return 0; - -out_destroy_syncd: - destroy_workqueue(xfs_syncd_wq); -out: - return -ENOMEM; -} - -STATIC void -xfs_destroy_workqueues(void) -{ - destroy_workqueue(xfs_ail_wq); - destroy_workqueue(xfs_syncd_wq); -} - -STATIC int __init -init_xfs_fs(void) -{ - int error; - - printk(KERN_INFO XFS_VERSION_STRING " with " - XFS_BUILD_OPTIONS " enabled\n"); - - xfs_ioend_init(); - xfs_dir_startup(); - - error = xfs_init_zones(); - if (error) - goto out; - - error = xfs_init_workqueues(); - if (error) - goto out_destroy_zones; - - error = xfs_mru_cache_init(); - if (error) - goto out_destroy_wq; - - error = xfs_filestream_init(); - if (error) - goto out_mru_cache_uninit; - - error = xfs_buf_init(); - if (error) - goto out_filestream_uninit; - - error = xfs_init_procfs(); - if (error) - goto out_buf_terminate; - - error = xfs_sysctl_register(); - if (error) - goto out_cleanup_procfs; - - vfs_initquota(); - - error = register_filesystem(&xfs_fs_type); - if (error) - goto out_sysctl_unregister; - return 0; - - out_sysctl_unregister: - xfs_sysctl_unregister(); - out_cleanup_procfs: - xfs_cleanup_procfs(); - out_buf_terminate: - xfs_buf_terminate(); - out_filestream_uninit: - xfs_filestream_uninit(); - out_mru_cache_uninit: - xfs_mru_cache_uninit(); - out_destroy_wq: - xfs_destroy_workqueues(); - out_destroy_zones: - xfs_destroy_zones(); - out: - return error; -} - -STATIC void __exit -exit_xfs_fs(void) -{ - vfs_exitquota(); - unregister_filesystem(&xfs_fs_type); - xfs_sysctl_unregister(); - xfs_cleanup_procfs(); - xfs_buf_terminate(); - xfs_filestream_uninit(); - xfs_mru_cache_uninit(); - xfs_destroy_workqueues(); - xfs_destroy_zones(); -} - -module_init(init_xfs_fs); -module_exit(exit_xfs_fs); - -MODULE_AUTHOR("Silicon Graphics, Inc."); -MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); -MODULE_LICENSE("GPL"); diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h deleted file mode 100644 index 50a3266..0000000 --- a/fs/xfs/linux-2.6/xfs_super.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPER_H__ -#define __XFS_SUPER_H__ - -#include - -#ifdef CONFIG_XFS_QUOTA -extern void xfs_qm_init(void); -extern void xfs_qm_exit(void); -# define vfs_initquota() xfs_qm_init() -# define vfs_exitquota() xfs_qm_exit() -#else -# define vfs_initquota() do { } while (0) -# define vfs_exitquota() do { } while (0) -#endif - -#ifdef CONFIG_XFS_POSIX_ACL -# define XFS_ACL_STRING "ACLs, " -# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL) -#else -# define XFS_ACL_STRING -# define set_posix_acl_flag(sb) do { } while (0) -#endif - -#define XFS_SECURITY_STRING "security attributes, " - -#ifdef CONFIG_XFS_RT -# define XFS_REALTIME_STRING "realtime, " -#else -# define XFS_REALTIME_STRING -#endif - -#if XFS_BIG_BLKNOS -# if XFS_BIG_INUMS -# define XFS_BIGFS_STRING "large block/inode numbers, " -# else -# define XFS_BIGFS_STRING "large block numbers, " -# endif -#else -# define XFS_BIGFS_STRING -#endif - -#ifdef DEBUG -# define XFS_DBG_STRING "debug" -#else -# define XFS_DBG_STRING "no debug" -#endif - -#define XFS_VERSION_STRING "SGI XFS" -#define XFS_BUILD_OPTIONS XFS_ACL_STRING \ - XFS_SECURITY_STRING \ - XFS_REALTIME_STRING \ - XFS_BIGFS_STRING \ - XFS_DBG_STRING /* DBG must be last */ - -struct xfs_inode; -struct xfs_mount; -struct xfs_buftarg; -struct block_device; - -extern __uint64_t xfs_max_file_offset(unsigned int); - -extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); - -extern const struct export_operations xfs_export_operations; -extern const struct xattr_handler *xfs_xattr_handlers[]; -extern const struct quotactl_ops xfs_quotactl_operations; - -#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) - -#endif /* __XFS_SUPER_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c deleted file mode 100644 index 4604f90..0000000 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ /dev/null @@ -1,1065 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_types.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_trans_priv.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_dinode.h" -#include "xfs_error.h" -#include "xfs_filestream.h" -#include "xfs_vnodeops.h" -#include "xfs_inode_item.h" -#include "xfs_quota.h" -#include "xfs_trace.h" -#include "xfs_fsops.h" - -#include -#include - -struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ - -/* - * The inode lookup is done in batches to keep the amount of lock traffic and - * radix tree lookups to a minimum. The batch size is a trade off between - * lookup reduction and stack usage. This is in the reclaim path, so we can't - * be too greedy. - */ -#define XFS_LOOKUP_BATCH 32 - -STATIC int -xfs_inode_ag_walk_grab( - struct xfs_inode *ip) -{ - struct inode *inode = VFS_I(ip); - - ASSERT(rcu_read_lock_held()); - - /* - * check for stale RCU freed inode - * - * If the inode has been reallocated, it doesn't matter if it's not in - * the AG we are walking - we are walking for writeback, so if it - * passes all the "valid inode" checks and is dirty, then we'll write - * it back anyway. If it has been reallocated and still being - * initialised, the XFS_INEW check below will catch it. - */ - spin_lock(&ip->i_flags_lock); - if (!ip->i_ino) - goto out_unlock_noent; - - /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ - if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) - goto out_unlock_noent; - spin_unlock(&ip->i_flags_lock); - - /* nothing to sync during shutdown */ - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return EFSCORRUPTED; - - /* If we can't grab the inode, it must on it's way to reclaim. */ - if (!igrab(inode)) - return ENOENT; - - if (is_bad_inode(inode)) { - IRELE(ip); - return ENOENT; - } - - /* inode is valid */ - return 0; - -out_unlock_noent: - spin_unlock(&ip->i_flags_lock); - return ENOENT; -} - -STATIC int -xfs_inode_ag_walk( - struct xfs_mount *mp, - struct xfs_perag *pag, - int (*execute)(struct xfs_inode *ip, - struct xfs_perag *pag, int flags), - int flags) -{ - uint32_t first_index; - int last_error = 0; - int skipped; - int done; - int nr_found; - -restart: - done = 0; - skipped = 0; - first_index = 0; - nr_found = 0; - do { - struct xfs_inode *batch[XFS_LOOKUP_BATCH]; - int error = 0; - int i; - - rcu_read_lock(); - nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, - (void **)batch, first_index, - XFS_LOOKUP_BATCH); - if (!nr_found) { - rcu_read_unlock(); - break; - } - - /* - * Grab the inodes before we drop the lock. if we found - * nothing, nr == 0 and the loop will be skipped. - */ - for (i = 0; i < nr_found; i++) { - struct xfs_inode *ip = batch[i]; - - if (done || xfs_inode_ag_walk_grab(ip)) - batch[i] = NULL; - - /* - * Update the index for the next lookup. Catch - * overflows into the next AG range which can occur if - * we have inodes in the last block of the AG and we - * are currently pointing to the last inode. - * - * Because we may see inodes that are from the wrong AG - * due to RCU freeing and reallocation, only update the - * index if it lies in this AG. It was a race that lead - * us to see this inode, so another lookup from the - * same index will not find it again. - */ - if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) - continue; - first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); - if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) - done = 1; - } - - /* unlock now we've grabbed the inodes. */ - rcu_read_unlock(); - - for (i = 0; i < nr_found; i++) { - if (!batch[i]) - continue; - error = execute(batch[i], pag, flags); - IRELE(batch[i]); - if (error == EAGAIN) { - skipped++; - continue; - } - if (error && last_error != EFSCORRUPTED) - last_error = error; - } - - /* bail out if the filesystem is corrupted. */ - if (error == EFSCORRUPTED) - break; - - cond_resched(); - - } while (nr_found && !done); - - if (skipped) { - delay(1); - goto restart; - } - return last_error; -} - -int -xfs_inode_ag_iterator( - struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, - struct xfs_perag *pag, int flags), - int flags) -{ - struct xfs_perag *pag; - int error = 0; - int last_error = 0; - xfs_agnumber_t ag; - - ag = 0; - while ((pag = xfs_perag_get(mp, ag))) { - ag = pag->pag_agno + 1; - error = xfs_inode_ag_walk(mp, pag, execute, flags); - xfs_perag_put(pag); - if (error) { - last_error = error; - if (error == EFSCORRUPTED) - break; - } - } - return XFS_ERROR(last_error); -} - -STATIC int -xfs_sync_inode_data( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - struct inode *inode = VFS_I(ip); - struct address_space *mapping = inode->i_mapping; - int error = 0; - - if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) - goto out_wait; - - if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { - if (flags & SYNC_TRYLOCK) - goto out_wait; - xfs_ilock(ip, XFS_IOLOCK_SHARED); - } - - error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? - 0 : XBF_ASYNC, FI_NONE); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); - - out_wait: - if (flags & SYNC_WAIT) - xfs_ioend_wait(ip); - return error; -} - -STATIC int -xfs_sync_inode_attr( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - int error = 0; - - xfs_ilock(ip, XFS_ILOCK_SHARED); - if (xfs_inode_clean(ip)) - goto out_unlock; - if (!xfs_iflock_nowait(ip)) { - if (!(flags & SYNC_WAIT)) - goto out_unlock; - xfs_iflock(ip); - } - - if (xfs_inode_clean(ip)) { - xfs_ifunlock(ip); - goto out_unlock; - } - - error = xfs_iflush(ip, flags); - - /* - * We don't want to try again on non-blocking flushes that can't run - * again immediately. If an inode really must be written, then that's - * what the SYNC_WAIT flag is for. - */ - if (error == EAGAIN) { - ASSERT(!(flags & SYNC_WAIT)); - error = 0; - } - - out_unlock: - xfs_iunlock(ip, XFS_ILOCK_SHARED); - return error; -} - -/* - * Write out pagecache data for the whole filesystem. - */ -STATIC int -xfs_sync_data( - struct xfs_mount *mp, - int flags) -{ - int error; - - ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); - - error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); - if (error) - return XFS_ERROR(error); - - xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); - return 0; -} - -/* - * Write out inode metadata (attributes) for the whole filesystem. - */ -STATIC int -xfs_sync_attr( - struct xfs_mount *mp, - int flags) -{ - ASSERT((flags & ~SYNC_WAIT) == 0); - - return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags); -} - -STATIC int -xfs_sync_fsdata( - struct xfs_mount *mp) -{ - struct xfs_buf *bp; - - /* - * If the buffer is pinned then push on the log so we won't get stuck - * waiting in the write for someone, maybe ourselves, to flush the log. - * - * Even though we just pushed the log above, we did not have the - * superblock buffer locked at that point so it can become pinned in - * between there and here. - */ - bp = xfs_getsb(mp, 0); - if (xfs_buf_ispinned(bp)) - xfs_log_force(mp, 0); - - return xfs_bwrite(mp, bp); -} - -/* - * When remounting a filesystem read-only or freezing the filesystem, we have - * two phases to execute. This first phase is syncing the data before we - * quiesce the filesystem, and the second is flushing all the inodes out after - * we've waited for all the transactions created by the first phase to - * complete. The second phase ensures that the inodes are written to their - * location on disk rather than just existing in transactions in the log. This - * means after a quiesce there is no log replay required to write the inodes to - * disk (this is the main difference between a sync and a quiesce). - */ -/* - * First stage of freeze - no writers will make progress now we are here, - * so we flush delwri and delalloc buffers here, then wait for all I/O to - * complete. Data is frozen at that point. Metadata is not frozen, - * transactions can still occur here so don't bother flushing the buftarg - * because it'll just get dirty again. - */ -int -xfs_quiesce_data( - struct xfs_mount *mp) -{ - int error, error2 = 0; - - xfs_qm_sync(mp, SYNC_TRYLOCK); - xfs_qm_sync(mp, SYNC_WAIT); - - /* force out the newly dirtied log buffers */ - xfs_log_force(mp, XFS_LOG_SYNC); - - /* write superblock and hoover up shutdown errors */ - error = xfs_sync_fsdata(mp); - - /* make sure all delwri buffers are written out */ - xfs_flush_buftarg(mp->m_ddev_targp, 1); - - /* mark the log as covered if needed */ - if (xfs_log_need_covered(mp)) - error2 = xfs_fs_log_dummy(mp); - - /* flush data-only devices */ - if (mp->m_rtdev_targp) - XFS_bflush(mp->m_rtdev_targp); - - return error ? error : error2; -} - -STATIC void -xfs_quiesce_fs( - struct xfs_mount *mp) -{ - int count = 0, pincount; - - xfs_reclaim_inodes(mp, 0); - xfs_flush_buftarg(mp->m_ddev_targp, 0); - - /* - * This loop must run at least twice. The first instance of the loop - * will flush most meta data but that will generate more meta data - * (typically directory updates). Which then must be flushed and - * logged before we can write the unmount record. We also so sync - * reclaim of inodes to catch any that the above delwri flush skipped. - */ - do { - xfs_reclaim_inodes(mp, SYNC_WAIT); - xfs_sync_attr(mp, SYNC_WAIT); - pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); - if (!pincount) { - delay(50); - count++; - } - } while (count < 2); -} - -/* - * Second stage of a quiesce. The data is already synced, now we have to take - * care of the metadata. New transactions are already blocked, so we need to - * wait for any remaining transactions to drain out before proceeding. - */ -void -xfs_quiesce_attr( - struct xfs_mount *mp) -{ - int error = 0; - - /* wait for all modifications to complete */ - while (atomic_read(&mp->m_active_trans) > 0) - delay(100); - - /* flush inodes and push all remaining buffers out to disk */ - xfs_quiesce_fs(mp); - - /* - * Just warn here till VFS can correctly support - * read-only remount without racing. - */ - WARN_ON(atomic_read(&mp->m_active_trans) != 0); - - /* Push the superblock and write an unmount record */ - error = xfs_log_sbcount(mp); - if (error) - xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " - "Frozen image may not be consistent."); - xfs_log_unmount_write(mp); - xfs_unmountfs_writesb(mp); -} - -static void -xfs_syncd_queue_sync( - struct xfs_mount *mp) -{ - queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work, - msecs_to_jiffies(xfs_syncd_centisecs * 10)); -} - -/* - * Every sync period we need to unpin all items, reclaim inodes and sync - * disk quotas. We might need to cover the log to indicate that the - * filesystem is idle and not frozen. - */ -STATIC void -xfs_sync_worker( - struct work_struct *work) -{ - struct xfs_mount *mp = container_of(to_delayed_work(work), - struct xfs_mount, m_sync_work); - int error; - - if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { - /* dgc: errors ignored here */ - if (mp->m_super->s_frozen == SB_UNFROZEN && - xfs_log_need_covered(mp)) - error = xfs_fs_log_dummy(mp); - else - xfs_log_force(mp, 0); - error = xfs_qm_sync(mp, SYNC_TRYLOCK); - - /* start pushing all the metadata that is currently dirty */ - xfs_ail_push_all(mp->m_ail); - } - - /* queue us up again */ - xfs_syncd_queue_sync(mp); -} - -/* - * Queue a new inode reclaim pass if there are reclaimable inodes and there - * isn't a reclaim pass already in progress. By default it runs every 5s based - * on the xfs syncd work default of 30s. Perhaps this should have it's own - * tunable, but that can be done if this method proves to be ineffective or too - * aggressive. - */ -static void -xfs_syncd_queue_reclaim( - struct xfs_mount *mp) -{ - - /* - * We can have inodes enter reclaim after we've shut down the syncd - * workqueue during unmount, so don't allow reclaim work to be queued - * during unmount. - */ - if (!(mp->m_super->s_flags & MS_ACTIVE)) - return; - - rcu_read_lock(); - if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { - queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, - msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); - } - rcu_read_unlock(); -} - -/* - * This is a fast pass over the inode cache to try to get reclaim moving on as - * many inodes as possible in a short period of time. It kicks itself every few - * seconds, as well as being kicked by the inode cache shrinker when memory - * goes low. It scans as quickly as possible avoiding locked inodes or those - * already being flushed, and once done schedules a future pass. - */ -STATIC void -xfs_reclaim_worker( - struct work_struct *work) -{ - struct xfs_mount *mp = container_of(to_delayed_work(work), - struct xfs_mount, m_reclaim_work); - - xfs_reclaim_inodes(mp, SYNC_TRYLOCK); - xfs_syncd_queue_reclaim(mp); -} - -/* - * Flush delayed allocate data, attempting to free up reserved space - * from existing allocations. At this point a new allocation attempt - * has failed with ENOSPC and we are in the process of scratching our - * heads, looking about for more room. - * - * Queue a new data flush if there isn't one already in progress and - * wait for completion of the flush. This means that we only ever have one - * inode flush in progress no matter how many ENOSPC events are occurring and - * so will prevent the system from bogging down due to every concurrent - * ENOSPC event scanning all the active inodes in the system for writeback. - */ -void -xfs_flush_inodes( - struct xfs_inode *ip) -{ - struct xfs_mount *mp = ip->i_mount; - - queue_work(xfs_syncd_wq, &mp->m_flush_work); - flush_work_sync(&mp->m_flush_work); -} - -STATIC void -xfs_flush_worker( - struct work_struct *work) -{ - struct xfs_mount *mp = container_of(work, - struct xfs_mount, m_flush_work); - - xfs_sync_data(mp, SYNC_TRYLOCK); - xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); -} - -int -xfs_syncd_init( - struct xfs_mount *mp) -{ - INIT_WORK(&mp->m_flush_work, xfs_flush_worker); - INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); - INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); - - xfs_syncd_queue_sync(mp); - xfs_syncd_queue_reclaim(mp); - - return 0; -} - -void -xfs_syncd_stop( - struct xfs_mount *mp) -{ - cancel_delayed_work_sync(&mp->m_sync_work); - cancel_delayed_work_sync(&mp->m_reclaim_work); - cancel_work_sync(&mp->m_flush_work); -} - -void -__xfs_inode_set_reclaim_tag( - struct xfs_perag *pag, - struct xfs_inode *ip) -{ - radix_tree_tag_set(&pag->pag_ici_root, - XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), - XFS_ICI_RECLAIM_TAG); - - if (!pag->pag_ici_reclaimable) { - /* propagate the reclaim tag up into the perag radix tree */ - spin_lock(&ip->i_mount->m_perag_lock); - radix_tree_tag_set(&ip->i_mount->m_perag_tree, - XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), - XFS_ICI_RECLAIM_TAG); - spin_unlock(&ip->i_mount->m_perag_lock); - - /* schedule periodic background inode reclaim */ - xfs_syncd_queue_reclaim(ip->i_mount); - - trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, - -1, _RET_IP_); - } - pag->pag_ici_reclaimable++; -} - -/* - * We set the inode flag atomically with the radix tree tag. - * Once we get tag lookups on the radix tree, this inode flag - * can go away. - */ -void -xfs_inode_set_reclaim_tag( - xfs_inode_t *ip) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_perag *pag; - - pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); - spin_lock(&pag->pag_ici_lock); - spin_lock(&ip->i_flags_lock); - __xfs_inode_set_reclaim_tag(pag, ip); - __xfs_iflags_set(ip, XFS_IRECLAIMABLE); - spin_unlock(&ip->i_flags_lock); - spin_unlock(&pag->pag_ici_lock); - xfs_perag_put(pag); -} - -STATIC void -__xfs_inode_clear_reclaim( - xfs_perag_t *pag, - xfs_inode_t *ip) -{ - pag->pag_ici_reclaimable--; - if (!pag->pag_ici_reclaimable) { - /* clear the reclaim tag from the perag radix tree */ - spin_lock(&ip->i_mount->m_perag_lock); - radix_tree_tag_clear(&ip->i_mount->m_perag_tree, - XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), - XFS_ICI_RECLAIM_TAG); - spin_unlock(&ip->i_mount->m_perag_lock); - trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, - -1, _RET_IP_); - } -} - -void -__xfs_inode_clear_reclaim_tag( - xfs_mount_t *mp, - xfs_perag_t *pag, - xfs_inode_t *ip) -{ - radix_tree_tag_clear(&pag->pag_ici_root, - XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); - __xfs_inode_clear_reclaim(pag, ip); -} - -/* - * Grab the inode for reclaim exclusively. - * Return 0 if we grabbed it, non-zero otherwise. - */ -STATIC int -xfs_reclaim_inode_grab( - struct xfs_inode *ip, - int flags) -{ - ASSERT(rcu_read_lock_held()); - - /* quick check for stale RCU freed inode */ - if (!ip->i_ino) - return 1; - - /* - * do some unlocked checks first to avoid unnecessary lock traffic. - * The first is a flush lock check, the second is a already in reclaim - * check. Only do these checks if we are not going to block on locks. - */ - if ((flags & SYNC_TRYLOCK) && - (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { - return 1; - } - - /* - * The radix tree lock here protects a thread in xfs_iget from racing - * with us starting reclaim on the inode. Once we have the - * XFS_IRECLAIM flag set it will not touch us. - * - * Due to RCU lookup, we may find inodes that have been freed and only - * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that - * aren't candidates for reclaim at all, so we must check the - * XFS_IRECLAIMABLE is set first before proceeding to reclaim. - */ - spin_lock(&ip->i_flags_lock); - if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || - __xfs_iflags_test(ip, XFS_IRECLAIM)) { - /* not a reclaim candidate. */ - spin_unlock(&ip->i_flags_lock); - return 1; - } - __xfs_iflags_set(ip, XFS_IRECLAIM); - spin_unlock(&ip->i_flags_lock); - return 0; -} - -/* - * Inodes in different states need to be treated differently, and the return - * value of xfs_iflush is not sufficient to get this right. The following table - * lists the inode states and the reclaim actions necessary for non-blocking - * reclaim: - * - * - * inode state iflush ret required action - * --------------- ---------- --------------- - * bad - reclaim - * shutdown EIO unpin and reclaim - * clean, unpinned 0 reclaim - * stale, unpinned 0 reclaim - * clean, pinned(*) 0 requeue - * stale, pinned EAGAIN requeue - * dirty, delwri ok 0 requeue - * dirty, delwri blocked EAGAIN requeue - * dirty, sync flush 0 reclaim - * - * (*) dgc: I don't think the clean, pinned state is possible but it gets - * handled anyway given the order of checks implemented. - * - * As can be seen from the table, the return value of xfs_iflush() is not - * sufficient to correctly decide the reclaim action here. The checks in - * xfs_iflush() might look like duplicates, but they are not. - * - * Also, because we get the flush lock first, we know that any inode that has - * been flushed delwri has had the flush completed by the time we check that - * the inode is clean. The clean inode check needs to be done before flushing - * the inode delwri otherwise we would loop forever requeuing clean inodes as - * we cannot tell apart a successful delwri flush and a clean inode from the - * return value of xfs_iflush(). - * - * Note that because the inode is flushed delayed write by background - * writeback, the flush lock may already be held here and waiting on it can - * result in very long latencies. Hence for sync reclaims, where we wait on the - * flush lock, the caller should push out delayed write inodes first before - * trying to reclaim them to minimise the amount of time spent waiting. For - * background relaim, we just requeue the inode for the next pass. - * - * Hence the order of actions after gaining the locks should be: - * bad => reclaim - * shutdown => unpin and reclaim - * pinned, delwri => requeue - * pinned, sync => unpin - * stale => reclaim - * clean => reclaim - * dirty, delwri => flush and requeue - * dirty, sync => flush, wait and reclaim - */ -STATIC int -xfs_reclaim_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int sync_mode) -{ - int error; - -restart: - error = 0; - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (!xfs_iflock_nowait(ip)) { - if (!(sync_mode & SYNC_WAIT)) - goto out; - xfs_iflock(ip); - } - - if (is_bad_inode(VFS_I(ip))) - goto reclaim; - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { - xfs_iunpin_wait(ip); - goto reclaim; - } - if (xfs_ipincount(ip)) { - if (!(sync_mode & SYNC_WAIT)) { - xfs_ifunlock(ip); - goto out; - } - xfs_iunpin_wait(ip); - } - if (xfs_iflags_test(ip, XFS_ISTALE)) - goto reclaim; - if (xfs_inode_clean(ip)) - goto reclaim; - - /* - * Now we have an inode that needs flushing. - * - * We do a nonblocking flush here even if we are doing a SYNC_WAIT - * reclaim as we can deadlock with inode cluster removal. - * xfs_ifree_cluster() can lock the inode buffer before it locks the - * ip->i_lock, and we are doing the exact opposite here. As a result, - * doing a blocking xfs_itobp() to get the cluster buffer will result - * in an ABBA deadlock with xfs_ifree_cluster(). - * - * As xfs_ifree_cluser() must gather all inodes that are active in the - * cache to mark them stale, if we hit this case we don't actually want - * to do IO here - we want the inode marked stale so we can simply - * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush, - * just unlock the inode, back off and try again. Hopefully the next - * pass through will see the stale flag set on the inode. - */ - error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode); - if (sync_mode & SYNC_WAIT) { - if (error == EAGAIN) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* backoff longer than in xfs_ifree_cluster */ - delay(2); - goto restart; - } - xfs_iflock(ip); - goto reclaim; - } - - /* - * When we have to flush an inode but don't have SYNC_WAIT set, we - * flush the inode out using a delwri buffer and wait for the next - * call into reclaim to find it in a clean state instead of waiting for - * it now. We also don't return errors here - if the error is transient - * then the next reclaim pass will flush the inode, and if the error - * is permanent then the next sync reclaim will reclaim the inode and - * pass on the error. - */ - if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { - xfs_warn(ip->i_mount, - "inode 0x%llx background reclaim flush failed with %d", - (long long)ip->i_ino, error); - } -out: - xfs_iflags_clear(ip, XFS_IRECLAIM); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* - * We could return EAGAIN here to make reclaim rescan the inode tree in - * a short while. However, this just burns CPU time scanning the tree - * waiting for IO to complete and xfssyncd never goes back to the idle - * state. Instead, return 0 to let the next scheduled background reclaim - * attempt to reclaim the inode again. - */ - return 0; - -reclaim: - xfs_ifunlock(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - - XFS_STATS_INC(xs_ig_reclaims); - /* - * Remove the inode from the per-AG radix tree. - * - * Because radix_tree_delete won't complain even if the item was never - * added to the tree assert that it's been there before to catch - * problems with the inode life time early on. - */ - spin_lock(&pag->pag_ici_lock); - if (!radix_tree_delete(&pag->pag_ici_root, - XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) - ASSERT(0); - __xfs_inode_clear_reclaim(pag, ip); - spin_unlock(&pag->pag_ici_lock); - - /* - * Here we do an (almost) spurious inode lock in order to coordinate - * with inode cache radix tree lookups. This is because the lookup - * can reference the inodes in the cache without taking references. - * - * We make that OK here by ensuring that we wait until the inode is - * unlocked after the lookup before we go ahead and free it. We get - * both the ilock and the iolock because the code may need to drop the - * ilock one but will still hold the iolock. - */ - xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - xfs_qm_dqdetach(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - - xfs_inode_free(ip); - return error; - -} - -/* - * Walk the AGs and reclaim the inodes in them. Even if the filesystem is - * corrupted, we still want to try to reclaim all the inodes. If we don't, - * then a shut down during filesystem unmount reclaim walk leak all the - * unreclaimed inodes. - */ -int -xfs_reclaim_inodes_ag( - struct xfs_mount *mp, - int flags, - int *nr_to_scan) -{ - struct xfs_perag *pag; - int error = 0; - int last_error = 0; - xfs_agnumber_t ag; - int trylock = flags & SYNC_TRYLOCK; - int skipped; - -restart: - ag = 0; - skipped = 0; - while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { - unsigned long first_index = 0; - int done = 0; - int nr_found = 0; - - ag = pag->pag_agno + 1; - - if (trylock) { - if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { - skipped++; - xfs_perag_put(pag); - continue; - } - first_index = pag->pag_ici_reclaim_cursor; - } else - mutex_lock(&pag->pag_ici_reclaim_lock); - - do { - struct xfs_inode *batch[XFS_LOOKUP_BATCH]; - int i; - - rcu_read_lock(); - nr_found = radix_tree_gang_lookup_tag( - &pag->pag_ici_root, - (void **)batch, first_index, - XFS_LOOKUP_BATCH, - XFS_ICI_RECLAIM_TAG); - if (!nr_found) { - done = 1; - rcu_read_unlock(); - break; - } - - /* - * Grab the inodes before we drop the lock. if we found - * nothing, nr == 0 and the loop will be skipped. - */ - for (i = 0; i < nr_found; i++) { - struct xfs_inode *ip = batch[i]; - - if (done || xfs_reclaim_inode_grab(ip, flags)) - batch[i] = NULL; - - /* - * Update the index for the next lookup. Catch - * overflows into the next AG range which can - * occur if we have inodes in the last block of - * the AG and we are currently pointing to the - * last inode. - * - * Because we may see inodes that are from the - * wrong AG due to RCU freeing and - * reallocation, only update the index if it - * lies in this AG. It was a race that lead us - * to see this inode, so another lookup from - * the same index will not find it again. - */ - if (XFS_INO_TO_AGNO(mp, ip->i_ino) != - pag->pag_agno) - continue; - first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); - if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) - done = 1; - } - - /* unlock now we've grabbed the inodes. */ - rcu_read_unlock(); - - for (i = 0; i < nr_found; i++) { - if (!batch[i]) - continue; - error = xfs_reclaim_inode(batch[i], pag, flags); - if (error && last_error != EFSCORRUPTED) - last_error = error; - } - - *nr_to_scan -= XFS_LOOKUP_BATCH; - - cond_resched(); - - } while (nr_found && !done && *nr_to_scan > 0); - - if (trylock && !done) - pag->pag_ici_reclaim_cursor = first_index; - else - pag->pag_ici_reclaim_cursor = 0; - mutex_unlock(&pag->pag_ici_reclaim_lock); - xfs_perag_put(pag); - } - - /* - * if we skipped any AG, and we still have scan count remaining, do - * another pass this time using blocking reclaim semantics (i.e - * waiting on the reclaim locks and ignoring the reclaim cursors). This - * ensure that when we get more reclaimers than AGs we block rather - * than spin trying to execute reclaim. - */ - if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { - trylock = 0; - goto restart; - } - return XFS_ERROR(last_error); -} - -int -xfs_reclaim_inodes( - xfs_mount_t *mp, - int mode) -{ - int nr_to_scan = INT_MAX; - - return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); -} - -/* - * Scan a certain number of inodes for reclaim. - * - * When called we make sure that there is a background (fast) inode reclaim in - * progress, while we will throttle the speed of reclaim via doing synchronous - * reclaim of inodes. That means if we come across dirty inodes, we wait for - * them to be cleaned, which we hope will not be very long due to the - * background walker having already kicked the IO off on those dirty inodes. - */ -void -xfs_reclaim_inodes_nr( - struct xfs_mount *mp, - int nr_to_scan) -{ - /* kick background reclaimer and push the AIL */ - xfs_syncd_queue_reclaim(mp); - xfs_ail_push_all(mp->m_ail); - - xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); -} - -/* - * Return the number of reclaimable inodes in the filesystem for - * the shrinker to determine how much to reclaim. - */ -int -xfs_reclaim_inodes_count( - struct xfs_mount *mp) -{ - struct xfs_perag *pag; - xfs_agnumber_t ag = 0; - int reclaimable = 0; - - while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { - ag = pag->pag_agno + 1; - reclaimable += pag->pag_ici_reclaimable; - xfs_perag_put(pag); - } - return reclaimable; -} - diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h deleted file mode 100644 index 941202e..0000000 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2000-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef XFS_SYNC_H -#define XFS_SYNC_H 1 - -struct xfs_mount; -struct xfs_perag; - -#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ -#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ - -extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ - -int xfs_syncd_init(struct xfs_mount *mp); -void xfs_syncd_stop(struct xfs_mount *mp); - -int xfs_quiesce_data(struct xfs_mount *mp); -void xfs_quiesce_attr(struct xfs_mount *mp); - -void xfs_flush_inodes(struct xfs_inode *ip); - -int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); -int xfs_reclaim_inodes_count(struct xfs_mount *mp); -void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); - -void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); -void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); -void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, - struct xfs_inode *ip); - -int xfs_sync_inode_grab(struct xfs_inode *ip); -int xfs_inode_ag_iterator(struct xfs_mount *mp, - int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), - int flags); - -#endif diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c deleted file mode 100644 index ee2d2ad..0000000 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright (c) 2001-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include -#include -#include "xfs_error.h" - -static struct ctl_table_header *xfs_table_header; - -#ifdef CONFIG_PROC_FS -STATIC int -xfs_stats_clear_proc_handler( - ctl_table *ctl, - int write, - void __user *buffer, - size_t *lenp, - loff_t *ppos) -{ - int c, ret, *valp = ctl->data; - __uint32_t vn_active; - - ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); - - if (!ret && write && *valp) { - xfs_notice(NULL, "Clearing xfsstats"); - for_each_possible_cpu(c) { - preempt_disable(); - /* save vn_active, it's a universal truth! */ - vn_active = per_cpu(xfsstats, c).vn_active; - memset(&per_cpu(xfsstats, c), 0, - sizeof(struct xfsstats)); - per_cpu(xfsstats, c).vn_active = vn_active; - preempt_enable(); - } - xfs_stats_clear = 0; - } - - return ret; -} - -STATIC int -xfs_panic_mask_proc_handler( - ctl_table *ctl, - int write, - void __user *buffer, - size_t *lenp, - loff_t *ppos) -{ - int ret, *valp = ctl->data; - - ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); - if (!ret && write) { - xfs_panic_mask = *valp; -#ifdef DEBUG - xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES); -#endif - } - return ret; -} -#endif /* CONFIG_PROC_FS */ - -static ctl_table xfs_table[] = { - { - .procname = "irix_sgid_inherit", - .data = &xfs_params.sgid_inherit.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.sgid_inherit.min, - .extra2 = &xfs_params.sgid_inherit.max - }, - { - .procname = "irix_symlink_mode", - .data = &xfs_params.symlink_mode.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.symlink_mode.min, - .extra2 = &xfs_params.symlink_mode.max - }, - { - .procname = "panic_mask", - .data = &xfs_params.panic_mask.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = xfs_panic_mask_proc_handler, - .extra1 = &xfs_params.panic_mask.min, - .extra2 = &xfs_params.panic_mask.max - }, - - { - .procname = "error_level", - .data = &xfs_params.error_level.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.error_level.min, - .extra2 = &xfs_params.error_level.max - }, - { - .procname = "xfssyncd_centisecs", - .data = &xfs_params.syncd_timer.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.syncd_timer.min, - .extra2 = &xfs_params.syncd_timer.max - }, - { - .procname = "inherit_sync", - .data = &xfs_params.inherit_sync.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.inherit_sync.min, - .extra2 = &xfs_params.inherit_sync.max - }, - { - .procname = "inherit_nodump", - .data = &xfs_params.inherit_nodump.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.inherit_nodump.min, - .extra2 = &xfs_params.inherit_nodump.max - }, - { - .procname = "inherit_noatime", - .data = &xfs_params.inherit_noatim.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.inherit_noatim.min, - .extra2 = &xfs_params.inherit_noatim.max - }, - { - .procname = "xfsbufd_centisecs", - .data = &xfs_params.xfs_buf_timer.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.xfs_buf_timer.min, - .extra2 = &xfs_params.xfs_buf_timer.max - }, - { - .procname = "age_buffer_centisecs", - .data = &xfs_params.xfs_buf_age.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.xfs_buf_age.min, - .extra2 = &xfs_params.xfs_buf_age.max - }, - { - .procname = "inherit_nosymlinks", - .data = &xfs_params.inherit_nosym.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.inherit_nosym.min, - .extra2 = &xfs_params.inherit_nosym.max - }, - { - .procname = "rotorstep", - .data = &xfs_params.rotorstep.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.rotorstep.min, - .extra2 = &xfs_params.rotorstep.max - }, - { - .procname = "inherit_nodefrag", - .data = &xfs_params.inherit_nodfrg.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.inherit_nodfrg.min, - .extra2 = &xfs_params.inherit_nodfrg.max - }, - { - .procname = "filestream_centisecs", - .data = &xfs_params.fstrm_timer.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &xfs_params.fstrm_timer.min, - .extra2 = &xfs_params.fstrm_timer.max, - }, - /* please keep this the last entry */ -#ifdef CONFIG_PROC_FS - { - .procname = "stats_clear", - .data = &xfs_params.stats_clear.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = xfs_stats_clear_proc_handler, - .extra1 = &xfs_params.stats_clear.min, - .extra2 = &xfs_params.stats_clear.max - }, -#endif /* CONFIG_PROC_FS */ - - {} -}; - -static ctl_table xfs_dir_table[] = { - { - .procname = "xfs", - .mode = 0555, - .child = xfs_table - }, - {} -}; - -static ctl_table xfs_root_table[] = { - { - .procname = "fs", - .mode = 0555, - .child = xfs_dir_table - }, - {} -}; - -int -xfs_sysctl_register(void) -{ - xfs_table_header = register_sysctl_table(xfs_root_table); - if (!xfs_table_header) - return -ENOMEM; - return 0; -} - -void -xfs_sysctl_unregister(void) -{ - unregister_sysctl_table(xfs_table_header); -} diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/linux-2.6/xfs_sysctl.h deleted file mode 100644 index b9937d4..0000000 --- a/fs/xfs/linux-2.6/xfs_sysctl.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2001-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SYSCTL_H__ -#define __XFS_SYSCTL_H__ - -#include - -/* - * Tunable xfs parameters - */ - -typedef struct xfs_sysctl_val { - int min; - int val; - int max; -} xfs_sysctl_val_t; - -typedef struct xfs_param { - xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is - * not a member of parent dir GID. */ - xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */ - xfs_sysctl_val_t panic_mask; /* bitmask to cause panic on errors. */ - xfs_sysctl_val_t error_level; /* Degree of reporting for problems */ - xfs_sysctl_val_t syncd_timer; /* Interval between xfssyncd wakeups */ - xfs_sysctl_val_t stats_clear; /* Reset all XFS statistics to zero. */ - xfs_sysctl_val_t inherit_sync; /* Inherit the "sync" inode flag. */ - xfs_sysctl_val_t inherit_nodump;/* Inherit the "nodump" inode flag. */ - xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */ - xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */ - xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */ - xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */ - xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */ - xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ - xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */ -} xfs_param_t; - -/* - * xfs_error_level: - * - * How much error reporting will be done when internal problems are - * encountered. These problems normally return an EFSCORRUPTED to their - * caller, with no other information reported. - * - * 0 No error reports - * 1 Report EFSCORRUPTED errors that will cause a filesystem shutdown - * 5 Report all EFSCORRUPTED errors (all of the above errors, plus any - * additional errors that are known to not cause shutdowns) - * - * xfs_panic_mask bit 0x8 turns the error reports into panics - */ - -enum { - /* XFS_REFCACHE_SIZE = 1 */ - /* XFS_REFCACHE_PURGE = 2 */ - /* XFS_RESTRICT_CHOWN = 3 */ - XFS_SGID_INHERIT = 4, - XFS_SYMLINK_MODE = 5, - XFS_PANIC_MASK = 6, - XFS_ERRLEVEL = 7, - XFS_SYNCD_TIMER = 8, - /* XFS_PROBE_DMAPI = 9 */ - /* XFS_PROBE_IOOPS = 10 */ - /* XFS_PROBE_QUOTA = 11 */ - XFS_STATS_CLEAR = 12, - XFS_INHERIT_SYNC = 13, - XFS_INHERIT_NODUMP = 14, - XFS_INHERIT_NOATIME = 15, - XFS_BUF_TIMER = 16, - XFS_BUF_AGE = 17, - /* XFS_IO_BYPASS = 18 */ - XFS_INHERIT_NOSYM = 19, - XFS_ROTORSTEP = 20, - XFS_INHERIT_NODFRG = 21, - XFS_FILESTREAM_TIMER = 22, -}; - -extern xfs_param_t xfs_params; - -#ifdef CONFIG_SYSCTL -extern int xfs_sysctl_register(void); -extern void xfs_sysctl_unregister(void); -#else -# define xfs_sysctl_register() (0) -# define xfs_sysctl_unregister() do { } while (0) -#endif /* CONFIG_SYSCTL */ - -#endif /* __XFS_SYSCTL_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_trace.c b/fs/xfs/linux-2.6/xfs_trace.c deleted file mode 100644 index 9010ce8..0000000 --- a/fs/xfs/linux-2.6/xfs_trace.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2009, Christoph Hellwig - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_types.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_da_btree.h" -#include "xfs_bmap_btree.h" -#include "xfs_alloc_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_btree.h" -#include "xfs_mount.h" -#include "xfs_ialloc.h" -#include "xfs_itable.h" -#include "xfs_alloc.h" -#include "xfs_bmap.h" -#include "xfs_attr.h" -#include "xfs_attr_leaf.h" -#include "xfs_log_priv.h" -#include "xfs_buf_item.h" -#include "xfs_quota.h" -#include "xfs_iomap.h" -#include "xfs_aops.h" -#include "xfs_dquot_item.h" -#include "xfs_dquot.h" -#include "xfs_log_recover.h" -#include "xfs_inode_item.h" - -/* - * We include this last to have the helpers above available for the trace - * event implementations. - */ -#define CREATE_TRACE_POINTS -#include "xfs_trace.h" diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h deleted file mode 100644 index 690fc7a..0000000 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ /dev/null @@ -1,1746 +0,0 @@ -/* - * Copyright (c) 2009, Christoph Hellwig - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM xfs - -#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_XFS_H - -#include - -struct xfs_agf; -struct xfs_alloc_arg; -struct xfs_attr_list_context; -struct xfs_buf_log_item; -struct xfs_da_args; -struct xfs_da_node_entry; -struct xfs_dquot; -struct xlog_ticket; -struct log; -struct xlog_recover; -struct xlog_recover_item; -struct xfs_buf_log_format; -struct xfs_inode_log_format; - -DECLARE_EVENT_CLASS(xfs_attr_list_class, - TP_PROTO(struct xfs_attr_list_context *ctx), - TP_ARGS(ctx), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(u32, hashval) - __field(u32, blkno) - __field(u32, offset) - __field(void *, alist) - __field(int, bufsize) - __field(int, count) - __field(int, firstu) - __field(int, dupcnt) - __field(int, flags) - ), - TP_fast_assign( - __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; - __entry->ino = ctx->dp->i_ino; - __entry->hashval = ctx->cursor->hashval; - __entry->blkno = ctx->cursor->blkno; - __entry->offset = ctx->cursor->offset; - __entry->alist = ctx->alist; - __entry->bufsize = ctx->bufsize; - __entry->count = ctx->count; - __entry->firstu = ctx->firstu; - __entry->flags = ctx->flags; - ), - TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " - "alist 0x%p size %u count %u firstu %u flags %d %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->hashval, - __entry->blkno, - __entry->offset, - __entry->dupcnt, - __entry->alist, - __entry->bufsize, - __entry->count, - __entry->firstu, - __entry->flags, - __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) - ) -) - -#define DEFINE_ATTR_LIST_EVENT(name) \ -DEFINE_EVENT(xfs_attr_list_class, name, \ - TP_PROTO(struct xfs_attr_list_context *ctx), \ - TP_ARGS(ctx)) -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk); -DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound); - -DECLARE_EVENT_CLASS(xfs_perag_class, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, - unsigned long caller_ip), - TP_ARGS(mp, agno, refcount, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(int, refcount) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->refcount = refcount; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d agno %u refcount %d caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->refcount, - (char *)__entry->caller_ip) -); - -#define DEFINE_PERAG_REF_EVENT(name) \ -DEFINE_EVENT(xfs_perag_class, name, \ - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \ - unsigned long caller_ip), \ - TP_ARGS(mp, agno, refcount, caller_ip)) -DEFINE_PERAG_REF_EVENT(xfs_perag_get); -DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag); -DEFINE_PERAG_REF_EVENT(xfs_perag_put); -DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); -DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); - -TRACE_EVENT(xfs_attr_list_node_descend, - TP_PROTO(struct xfs_attr_list_context *ctx, - struct xfs_da_node_entry *btree), - TP_ARGS(ctx, btree), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(u32, hashval) - __field(u32, blkno) - __field(u32, offset) - __field(void *, alist) - __field(int, bufsize) - __field(int, count) - __field(int, firstu) - __field(int, dupcnt) - __field(int, flags) - __field(u32, bt_hashval) - __field(u32, bt_before) - ), - TP_fast_assign( - __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; - __entry->ino = ctx->dp->i_ino; - __entry->hashval = ctx->cursor->hashval; - __entry->blkno = ctx->cursor->blkno; - __entry->offset = ctx->cursor->offset; - __entry->alist = ctx->alist; - __entry->bufsize = ctx->bufsize; - __entry->count = ctx->count; - __entry->firstu = ctx->firstu; - __entry->flags = ctx->flags; - __entry->bt_hashval = be32_to_cpu(btree->hashval); - __entry->bt_before = be32_to_cpu(btree->before); - ), - TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " - "alist 0x%p size %u count %u firstu %u flags %d %s " - "node hashval %u, node before %u", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->hashval, - __entry->blkno, - __entry->offset, - __entry->dupcnt, - __entry->alist, - __entry->bufsize, - __entry->count, - __entry->firstu, - __entry->flags, - __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS), - __entry->bt_hashval, - __entry->bt_before) -); - -TRACE_EVENT(xfs_iext_insert, - TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, - struct xfs_bmbt_irec *r, int state, unsigned long caller_ip), - TP_ARGS(ip, idx, r, state, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_extnum_t, idx) - __field(xfs_fileoff_t, startoff) - __field(xfs_fsblock_t, startblock) - __field(xfs_filblks_t, blockcount) - __field(xfs_exntst_t, state) - __field(int, bmap_state) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->idx = idx; - __entry->startoff = r->br_startoff; - __entry->startblock = r->br_startblock; - __entry->blockcount = r->br_blockcount; - __entry->state = r->br_state; - __entry->bmap_state = state; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " - "offset %lld block %lld count %lld flag %d caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), - (long)__entry->idx, - __entry->startoff, - (__int64_t)__entry->startblock, - __entry->blockcount, - __entry->state, - (char *)__entry->caller_ip) -); - -DECLARE_EVENT_CLASS(xfs_bmap_class, - TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, - unsigned long caller_ip), - TP_ARGS(ip, idx, state, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_extnum_t, idx) - __field(xfs_fileoff_t, startoff) - __field(xfs_fsblock_t, startblock) - __field(xfs_filblks_t, blockcount) - __field(xfs_exntst_t, state) - __field(int, bmap_state) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? - ip->i_afp : &ip->i_df; - struct xfs_bmbt_irec r; - - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->idx = idx; - __entry->startoff = r.br_startoff; - __entry->startblock = r.br_startblock; - __entry->blockcount = r.br_blockcount; - __entry->state = r.br_state; - __entry->bmap_state = state; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " - "offset %lld block %lld count %lld flag %d caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), - (long)__entry->idx, - __entry->startoff, - (__int64_t)__entry->startblock, - __entry->blockcount, - __entry->state, - (char *)__entry->caller_ip) -) - -#define DEFINE_BMAP_EVENT(name) \ -DEFINE_EVENT(xfs_bmap_class, name, \ - TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \ - unsigned long caller_ip), \ - TP_ARGS(ip, idx, state, caller_ip)) -DEFINE_BMAP_EVENT(xfs_iext_remove); -DEFINE_BMAP_EVENT(xfs_bmap_pre_update); -DEFINE_BMAP_EVENT(xfs_bmap_post_update); -DEFINE_BMAP_EVENT(xfs_extlist); - -DECLARE_EVENT_CLASS(xfs_buf_class, - TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), - TP_ARGS(bp, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_daddr_t, bno) - __field(size_t, buffer_length) - __field(int, hold) - __field(int, pincount) - __field(unsigned, lockval) - __field(unsigned, flags) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = bp->b_target->bt_dev; - __entry->bno = bp->b_bn; - __entry->buffer_length = bp->b_buffer_length; - __entry->hold = atomic_read(&bp->b_hold); - __entry->pincount = atomic_read(&bp->b_pin_count); - __entry->lockval = bp->b_sema.count; - __entry->flags = bp->b_flags; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " - "lock %d flags %s caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long long)__entry->bno, - __entry->buffer_length, - __entry->hold, - __entry->pincount, - __entry->lockval, - __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), - (void *)__entry->caller_ip) -) - -#define DEFINE_BUF_EVENT(name) \ -DEFINE_EVENT(xfs_buf_class, name, \ - TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ - TP_ARGS(bp, caller_ip)) -DEFINE_BUF_EVENT(xfs_buf_init); -DEFINE_BUF_EVENT(xfs_buf_free); -DEFINE_BUF_EVENT(xfs_buf_hold); -DEFINE_BUF_EVENT(xfs_buf_rele); -DEFINE_BUF_EVENT(xfs_buf_iodone); -DEFINE_BUF_EVENT(xfs_buf_iorequest); -DEFINE_BUF_EVENT(xfs_buf_bawrite); -DEFINE_BUF_EVENT(xfs_buf_bdwrite); -DEFINE_BUF_EVENT(xfs_buf_lock); -DEFINE_BUF_EVENT(xfs_buf_lock_done); -DEFINE_BUF_EVENT(xfs_buf_trylock); -DEFINE_BUF_EVENT(xfs_buf_unlock); -DEFINE_BUF_EVENT(xfs_buf_iowait); -DEFINE_BUF_EVENT(xfs_buf_iowait_done); -DEFINE_BUF_EVENT(xfs_buf_delwri_queue); -DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); -DEFINE_BUF_EVENT(xfs_buf_delwri_split); -DEFINE_BUF_EVENT(xfs_buf_get_uncached); -DEFINE_BUF_EVENT(xfs_bdstrat_shut); -DEFINE_BUF_EVENT(xfs_buf_item_relse); -DEFINE_BUF_EVENT(xfs_buf_item_iodone); -DEFINE_BUF_EVENT(xfs_buf_item_iodone_async); -DEFINE_BUF_EVENT(xfs_buf_error_relse); -DEFINE_BUF_EVENT(xfs_trans_read_buf_io); -DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); - -/* not really buffer traces, but the buf provides useful information */ -DEFINE_BUF_EVENT(xfs_btree_corrupt); -DEFINE_BUF_EVENT(xfs_da_btree_corrupt); -DEFINE_BUF_EVENT(xfs_reset_dqcounts); -DEFINE_BUF_EVENT(xfs_inode_item_push); - -/* pass flags explicitly */ -DECLARE_EVENT_CLASS(xfs_buf_flags_class, - TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), - TP_ARGS(bp, flags, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_daddr_t, bno) - __field(size_t, buffer_length) - __field(int, hold) - __field(int, pincount) - __field(unsigned, lockval) - __field(unsigned, flags) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = bp->b_target->bt_dev; - __entry->bno = bp->b_bn; - __entry->buffer_length = bp->b_buffer_length; - __entry->flags = flags; - __entry->hold = atomic_read(&bp->b_hold); - __entry->pincount = atomic_read(&bp->b_pin_count); - __entry->lockval = bp->b_sema.count; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " - "lock %d flags %s caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long long)__entry->bno, - __entry->buffer_length, - __entry->hold, - __entry->pincount, - __entry->lockval, - __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), - (void *)__entry->caller_ip) -) - -#define DEFINE_BUF_FLAGS_EVENT(name) \ -DEFINE_EVENT(xfs_buf_flags_class, name, \ - TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ - TP_ARGS(bp, flags, caller_ip)) -DEFINE_BUF_FLAGS_EVENT(xfs_buf_find); -DEFINE_BUF_FLAGS_EVENT(xfs_buf_get); -DEFINE_BUF_FLAGS_EVENT(xfs_buf_read); - -TRACE_EVENT(xfs_buf_ioerror, - TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip), - TP_ARGS(bp, error, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_daddr_t, bno) - __field(size_t, buffer_length) - __field(unsigned, flags) - __field(int, hold) - __field(int, pincount) - __field(unsigned, lockval) - __field(int, error) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = bp->b_target->bt_dev; - __entry->bno = bp->b_bn; - __entry->buffer_length = bp->b_buffer_length; - __entry->hold = atomic_read(&bp->b_hold); - __entry->pincount = atomic_read(&bp->b_pin_count); - __entry->lockval = bp->b_sema.count; - __entry->error = error; - __entry->flags = bp->b_flags; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " - "lock %d error %d flags %s caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long long)__entry->bno, - __entry->buffer_length, - __entry->hold, - __entry->pincount, - __entry->lockval, - __entry->error, - __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), - (void *)__entry->caller_ip) -); - -DECLARE_EVENT_CLASS(xfs_buf_item_class, - TP_PROTO(struct xfs_buf_log_item *bip), - TP_ARGS(bip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_daddr_t, buf_bno) - __field(size_t, buf_len) - __field(int, buf_hold) - __field(int, buf_pincount) - __field(int, buf_lockval) - __field(unsigned, buf_flags) - __field(unsigned, bli_recur) - __field(int, bli_refcount) - __field(unsigned, bli_flags) - __field(void *, li_desc) - __field(unsigned, li_flags) - ), - TP_fast_assign( - __entry->dev = bip->bli_buf->b_target->bt_dev; - __entry->bli_flags = bip->bli_flags; - __entry->bli_recur = bip->bli_recur; - __entry->bli_refcount = atomic_read(&bip->bli_refcount); - __entry->buf_bno = bip->bli_buf->b_bn; - __entry->buf_len = bip->bli_buf->b_buffer_length; - __entry->buf_flags = bip->bli_buf->b_flags; - __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); - __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); - __entry->buf_lockval = bip->bli_buf->b_sema.count; - __entry->li_desc = bip->bli_item.li_desc; - __entry->li_flags = bip->bli_item.li_flags; - ), - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " - "lock %d flags %s recur %d refcount %d bliflags %s " - "lidesc 0x%p liflags %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long long)__entry->buf_bno, - __entry->buf_len, - __entry->buf_hold, - __entry->buf_pincount, - __entry->buf_lockval, - __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), - __entry->bli_recur, - __entry->bli_refcount, - __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), - __entry->li_desc, - __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) -) - -#define DEFINE_BUF_ITEM_EVENT(name) \ -DEFINE_EVENT(xfs_buf_item_class, name, \ - TP_PROTO(struct xfs_buf_log_item *bip), \ - TP_ARGS(bip)) -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); -DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf); -DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); -DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur); -DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb); -DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur); -DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf); -DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur); -DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf); -DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse); -DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin); -DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold); -DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); -DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); - -DECLARE_EVENT_CLASS(xfs_lock_class, - TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, - unsigned long caller_ip), - TP_ARGS(ip, lock_flags, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(int, lock_flags) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->lock_flags = lock_flags; - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), - (void *)__entry->caller_ip) -) - -#define DEFINE_LOCK_EVENT(name) \ -DEFINE_EVENT(xfs_lock_class, name, \ - TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \ - unsigned long caller_ip), \ - TP_ARGS(ip, lock_flags, caller_ip)) -DEFINE_LOCK_EVENT(xfs_ilock); -DEFINE_LOCK_EVENT(xfs_ilock_nowait); -DEFINE_LOCK_EVENT(xfs_ilock_demote); -DEFINE_LOCK_EVENT(xfs_iunlock); - -DECLARE_EVENT_CLASS(xfs_inode_class, - TP_PROTO(struct xfs_inode *ip), - TP_ARGS(ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - ), - TP_printk("dev %d:%d ino 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino) -) - -#define DEFINE_INODE_EVENT(name) \ -DEFINE_EVENT(xfs_inode_class, name, \ - TP_PROTO(struct xfs_inode *ip), \ - TP_ARGS(ip)) -DEFINE_INODE_EVENT(xfs_iget_skip); -DEFINE_INODE_EVENT(xfs_iget_reclaim); -DEFINE_INODE_EVENT(xfs_iget_reclaim_fail); -DEFINE_INODE_EVENT(xfs_iget_hit); -DEFINE_INODE_EVENT(xfs_iget_miss); - -DEFINE_INODE_EVENT(xfs_getattr); -DEFINE_INODE_EVENT(xfs_setattr); -DEFINE_INODE_EVENT(xfs_readlink); -DEFINE_INODE_EVENT(xfs_alloc_file_space); -DEFINE_INODE_EVENT(xfs_free_file_space); -DEFINE_INODE_EVENT(xfs_readdir); -#ifdef CONFIG_XFS_POSIX_ACL -DEFINE_INODE_EVENT(xfs_get_acl); -#endif -DEFINE_INODE_EVENT(xfs_vm_bmap); -DEFINE_INODE_EVENT(xfs_file_ioctl); -DEFINE_INODE_EVENT(xfs_file_compat_ioctl); -DEFINE_INODE_EVENT(xfs_ioctl_setattr); -DEFINE_INODE_EVENT(xfs_file_fsync); -DEFINE_INODE_EVENT(xfs_destroy_inode); -DEFINE_INODE_EVENT(xfs_write_inode); -DEFINE_INODE_EVENT(xfs_evict_inode); - -DEFINE_INODE_EVENT(xfs_dquot_dqalloc); -DEFINE_INODE_EVENT(xfs_dquot_dqdetach); - -DECLARE_EVENT_CLASS(xfs_iref_class, - TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), - TP_ARGS(ip, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(int, count) - __field(int, pincount) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->count = atomic_read(&VFS_I(ip)->i_count); - __entry->pincount = atomic_read(&ip->i_pincount); - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->count, - __entry->pincount, - (char *)__entry->caller_ip) -) - -#define DEFINE_IREF_EVENT(name) \ -DEFINE_EVENT(xfs_iref_class, name, \ - TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ - TP_ARGS(ip, caller_ip)) -DEFINE_IREF_EVENT(xfs_ihold); -DEFINE_IREF_EVENT(xfs_irele); -DEFINE_IREF_EVENT(xfs_inode_pin); -DEFINE_IREF_EVENT(xfs_inode_unpin); -DEFINE_IREF_EVENT(xfs_inode_unpin_nowait); - -DECLARE_EVENT_CLASS(xfs_namespace_class, - TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), - TP_ARGS(dp, name), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, dp_ino) - __dynamic_array(char, name, name->len) - ), - TP_fast_assign( - __entry->dev = VFS_I(dp)->i_sb->s_dev; - __entry->dp_ino = dp->i_ino; - memcpy(__get_str(name), name->name, name->len); - ), - TP_printk("dev %d:%d dp ino 0x%llx name %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->dp_ino, - __get_str(name)) -) - -#define DEFINE_NAMESPACE_EVENT(name) \ -DEFINE_EVENT(xfs_namespace_class, name, \ - TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \ - TP_ARGS(dp, name)) -DEFINE_NAMESPACE_EVENT(xfs_remove); -DEFINE_NAMESPACE_EVENT(xfs_link); -DEFINE_NAMESPACE_EVENT(xfs_lookup); -DEFINE_NAMESPACE_EVENT(xfs_create); -DEFINE_NAMESPACE_EVENT(xfs_symlink); - -TRACE_EVENT(xfs_rename, - TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp, - struct xfs_name *src_name, struct xfs_name *target_name), - TP_ARGS(src_dp, target_dp, src_name, target_name), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, src_dp_ino) - __field(xfs_ino_t, target_dp_ino) - __dynamic_array(char, src_name, src_name->len) - __dynamic_array(char, target_name, target_name->len) - ), - TP_fast_assign( - __entry->dev = VFS_I(src_dp)->i_sb->s_dev; - __entry->src_dp_ino = src_dp->i_ino; - __entry->target_dp_ino = target_dp->i_ino; - memcpy(__get_str(src_name), src_name->name, src_name->len); - memcpy(__get_str(target_name), target_name->name, target_name->len); - ), - TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx" - " src name %s target name %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->src_dp_ino, - __entry->target_dp_ino, - __get_str(src_name), - __get_str(target_name)) -) - -DECLARE_EVENT_CLASS(xfs_dquot_class, - TP_PROTO(struct xfs_dquot *dqp), - TP_ARGS(dqp), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(u32, id) - __field(unsigned, flags) - __field(unsigned, nrefs) - __field(unsigned long long, res_bcount) - __field(unsigned long long, bcount) - __field(unsigned long long, icount) - __field(unsigned long long, blk_hardlimit) - __field(unsigned long long, blk_softlimit) - __field(unsigned long long, ino_hardlimit) - __field(unsigned long long, ino_softlimit) - ), \ - TP_fast_assign( - __entry->dev = dqp->q_mount->m_super->s_dev; - __entry->id = be32_to_cpu(dqp->q_core.d_id); - __entry->flags = dqp->dq_flags; - __entry->nrefs = dqp->q_nrefs; - __entry->res_bcount = dqp->q_res_bcount; - __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); - __entry->icount = be64_to_cpu(dqp->q_core.d_icount); - __entry->blk_hardlimit = - be64_to_cpu(dqp->q_core.d_blk_hardlimit); - __entry->blk_softlimit = - be64_to_cpu(dqp->q_core.d_blk_softlimit); - __entry->ino_hardlimit = - be64_to_cpu(dqp->q_core.d_ino_hardlimit); - __entry->ino_softlimit = - be64_to_cpu(dqp->q_core.d_ino_softlimit); - ), - TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " - "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx " - "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->id, - __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), - __entry->nrefs, - __entry->res_bcount, - __entry->bcount, - __entry->blk_hardlimit, - __entry->blk_softlimit, - __entry->icount, - __entry->ino_hardlimit, - __entry->ino_softlimit) -) - -#define DEFINE_DQUOT_EVENT(name) \ -DEFINE_EVENT(xfs_dquot_class, name, \ - TP_PROTO(struct xfs_dquot *dqp), \ - TP_ARGS(dqp)) -DEFINE_DQUOT_EVENT(xfs_dqadjust); -DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); -DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); -DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); -DEFINE_DQUOT_EVENT(xfs_dqattach_found); -DEFINE_DQUOT_EVENT(xfs_dqattach_get); -DEFINE_DQUOT_EVENT(xfs_dqinit); -DEFINE_DQUOT_EVENT(xfs_dqreuse); -DEFINE_DQUOT_EVENT(xfs_dqalloc); -DEFINE_DQUOT_EVENT(xfs_dqtobp_read); -DEFINE_DQUOT_EVENT(xfs_dqread); -DEFINE_DQUOT_EVENT(xfs_dqread_fail); -DEFINE_DQUOT_EVENT(xfs_dqlookup_found); -DEFINE_DQUOT_EVENT(xfs_dqlookup_want); -DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist); -DEFINE_DQUOT_EVENT(xfs_dqlookup_done); -DEFINE_DQUOT_EVENT(xfs_dqget_hit); -DEFINE_DQUOT_EVENT(xfs_dqget_miss); -DEFINE_DQUOT_EVENT(xfs_dqput); -DEFINE_DQUOT_EVENT(xfs_dqput_wait); -DEFINE_DQUOT_EVENT(xfs_dqput_free); -DEFINE_DQUOT_EVENT(xfs_dqrele); -DEFINE_DQUOT_EVENT(xfs_dqflush); -DEFINE_DQUOT_EVENT(xfs_dqflush_force); -DEFINE_DQUOT_EVENT(xfs_dqflush_done); - -DECLARE_EVENT_CLASS(xfs_loggrant_class, - TP_PROTO(struct log *log, struct xlog_ticket *tic), - TP_ARGS(log, tic), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(unsigned, trans_type) - __field(char, ocnt) - __field(char, cnt) - __field(int, curr_res) - __field(int, unit_res) - __field(unsigned int, flags) - __field(int, reserveq) - __field(int, writeq) - __field(int, grant_reserve_cycle) - __field(int, grant_reserve_bytes) - __field(int, grant_write_cycle) - __field(int, grant_write_bytes) - __field(int, curr_cycle) - __field(int, curr_block) - __field(xfs_lsn_t, tail_lsn) - ), - TP_fast_assign( - __entry->dev = log->l_mp->m_super->s_dev; - __entry->trans_type = tic->t_trans_type; - __entry->ocnt = tic->t_ocnt; - __entry->cnt = tic->t_cnt; - __entry->curr_res = tic->t_curr_res; - __entry->unit_res = tic->t_unit_res; - __entry->flags = tic->t_flags; - __entry->reserveq = list_empty(&log->l_reserveq); - __entry->writeq = list_empty(&log->l_writeq); - xlog_crack_grant_head(&log->l_grant_reserve_head, - &__entry->grant_reserve_cycle, - &__entry->grant_reserve_bytes); - xlog_crack_grant_head(&log->l_grant_write_head, - &__entry->grant_write_cycle, - &__entry->grant_write_bytes); - __entry->curr_cycle = log->l_curr_cycle; - __entry->curr_block = log->l_curr_block; - __entry->tail_lsn = atomic64_read(&log->l_tail_lsn); - ), - TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " - "t_unit_res %u t_flags %s reserveq %s " - "writeq %s grant_reserve_cycle %d " - "grant_reserve_bytes %d grant_write_cycle %d " - "grant_write_bytes %d curr_cycle %d curr_block %d " - "tail_cycle %d tail_block %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), - __entry->ocnt, - __entry->cnt, - __entry->curr_res, - __entry->unit_res, - __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), - __entry->reserveq ? "empty" : "active", - __entry->writeq ? "empty" : "active", - __entry->grant_reserve_cycle, - __entry->grant_reserve_bytes, - __entry->grant_write_cycle, - __entry->grant_write_bytes, - __entry->curr_cycle, - __entry->curr_block, - CYCLE_LSN(__entry->tail_lsn), - BLOCK_LSN(__entry->tail_lsn) - ) -) - -#define DEFINE_LOGGRANT_EVENT(name) \ -DEFINE_EVENT(xfs_loggrant_class, name, \ - TP_PROTO(struct log *log, struct xlog_ticket *tic), \ - TP_ARGS(log, tic)) -DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); -DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); -DEFINE_LOGGRANT_EVENT(xfs_log_reserve); -DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); -DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); -DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); -DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter); -DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit); -DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub); - -DECLARE_EVENT_CLASS(xfs_file_class, - TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), - TP_ARGS(ip, count, offset, flags), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fsize_t, size) - __field(xfs_fsize_t, new_size) - __field(loff_t, offset) - __field(size_t, count) - __field(int, flags) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; - __entry->new_size = ip->i_new_size; - __entry->offset = offset; - __entry->count = count; - __entry->flags = flags; - ), - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " - "offset 0x%llx count 0x%zx ioflags %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->new_size, - __entry->offset, - __entry->count, - __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) -) - -#define DEFINE_RW_EVENT(name) \ -DEFINE_EVENT(xfs_file_class, name, \ - TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \ - TP_ARGS(ip, count, offset, flags)) -DEFINE_RW_EVENT(xfs_file_read); -DEFINE_RW_EVENT(xfs_file_buffered_write); -DEFINE_RW_EVENT(xfs_file_direct_write); -DEFINE_RW_EVENT(xfs_file_splice_read); -DEFINE_RW_EVENT(xfs_file_splice_write); - -DECLARE_EVENT_CLASS(xfs_page_class, - TP_PROTO(struct inode *inode, struct page *page, unsigned long off), - TP_ARGS(inode, page, off), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(pgoff_t, pgoff) - __field(loff_t, size) - __field(unsigned long, offset) - __field(int, delalloc) - __field(int, unwritten) - ), - TP_fast_assign( - int delalloc = -1, unwritten = -1; - - if (page_has_buffers(page)) - xfs_count_page_state(page, &delalloc, &unwritten); - __entry->dev = inode->i_sb->s_dev; - __entry->ino = XFS_I(inode)->i_ino; - __entry->pgoff = page_offset(page); - __entry->size = i_size_read(inode); - __entry->offset = off; - __entry->delalloc = delalloc; - __entry->unwritten = unwritten; - ), - TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " - "delalloc %d unwritten %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->pgoff, - __entry->size, - __entry->offset, - __entry->delalloc, - __entry->unwritten) -) - -#define DEFINE_PAGE_EVENT(name) \ -DEFINE_EVENT(xfs_page_class, name, \ - TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ - TP_ARGS(inode, page, off)) -DEFINE_PAGE_EVENT(xfs_writepage); -DEFINE_PAGE_EVENT(xfs_releasepage); -DEFINE_PAGE_EVENT(xfs_invalidatepage); - -DECLARE_EVENT_CLASS(xfs_imap_class, - TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, - int type, struct xfs_bmbt_irec *irec), - TP_ARGS(ip, offset, count, type, irec), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(loff_t, size) - __field(loff_t, new_size) - __field(loff_t, offset) - __field(size_t, count) - __field(int, type) - __field(xfs_fileoff_t, startoff) - __field(xfs_fsblock_t, startblock) - __field(xfs_filblks_t, blockcount) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; - __entry->new_size = ip->i_new_size; - __entry->offset = offset; - __entry->count = count; - __entry->type = type; - __entry->startoff = irec ? irec->br_startoff : 0; - __entry->startblock = irec ? irec->br_startblock : 0; - __entry->blockcount = irec ? irec->br_blockcount : 0; - ), - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " - "offset 0x%llx count %zd type %s " - "startoff 0x%llx startblock %lld blockcount 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->new_size, - __entry->offset, - __entry->count, - __print_symbolic(__entry->type, XFS_IO_TYPES), - __entry->startoff, - (__int64_t)__entry->startblock, - __entry->blockcount) -) - -#define DEFINE_IOMAP_EVENT(name) \ -DEFINE_EVENT(xfs_imap_class, name, \ - TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ - int type, struct xfs_bmbt_irec *irec), \ - TP_ARGS(ip, offset, count, type, irec)) -DEFINE_IOMAP_EVENT(xfs_map_blocks_found); -DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc); -DEFINE_IOMAP_EVENT(xfs_get_blocks_found); -DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); - -DECLARE_EVENT_CLASS(xfs_simple_io_class, - TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), - TP_ARGS(ip, offset, count), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(loff_t, isize) - __field(loff_t, disize) - __field(loff_t, new_size) - __field(loff_t, offset) - __field(size_t, count) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->isize = ip->i_size; - __entry->disize = ip->i_d.di_size; - __entry->new_size = ip->i_new_size; - __entry->offset = offset; - __entry->count = count; - ), - TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx " - "offset 0x%llx count %zd", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->isize, - __entry->disize, - __entry->new_size, - __entry->offset, - __entry->count) -); - -#define DEFINE_SIMPLE_IO_EVENT(name) \ -DEFINE_EVENT(xfs_simple_io_class, name, \ - TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \ - TP_ARGS(ip, offset, count)) -DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); -DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); -DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); -DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); - -DECLARE_EVENT_CLASS(xfs_itrunc_class, - TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), - TP_ARGS(ip, new_size), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fsize_t, size) - __field(xfs_fsize_t, new_size) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; - __entry->new_size = new_size; - ), - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->new_size) -) - -#define DEFINE_ITRUNC_EVENT(name) \ -DEFINE_EVENT(xfs_itrunc_class, name, \ - TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ - TP_ARGS(ip, new_size)) -DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start); -DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end); - -TRACE_EVENT(xfs_pagecache_inval, - TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), - TP_ARGS(ip, start, finish), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fsize_t, size) - __field(xfs_off_t, start) - __field(xfs_off_t, finish) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; - __entry->start = start; - __entry->finish = finish; - ), - TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->start, - __entry->finish) -); - -TRACE_EVENT(xfs_bunmap, - TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, - int flags, unsigned long caller_ip), - TP_ARGS(ip, bno, len, flags, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fsize_t, size) - __field(xfs_fileoff_t, bno) - __field(xfs_filblks_t, len) - __field(unsigned long, caller_ip) - __field(int, flags) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; - __entry->bno = bno; - __entry->len = len; - __entry->caller_ip = caller_ip; - __entry->flags = flags; - ), - TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx" - "flags %s caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->bno, - __entry->len, - __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS), - (void *)__entry->caller_ip) - -); - -DECLARE_EVENT_CLASS(xfs_busy_class, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t agbno, xfs_extlen_t len), - TP_ARGS(mp, agno, agbno, len), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(xfs_extlen_t, len) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->agbno = agbno; - __entry->len = len; - ), - TP_printk("dev %d:%d agno %u agbno %u len %u", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __entry->len) -); -#define DEFINE_BUSY_EVENT(name) \ -DEFINE_EVENT(xfs_busy_class, name, \ - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ - xfs_agblock_t agbno, xfs_extlen_t len), \ - TP_ARGS(mp, agno, agbno, len)) -DEFINE_BUSY_EVENT(xfs_alloc_busy); -DEFINE_BUSY_EVENT(xfs_alloc_busy_enomem); -DEFINE_BUSY_EVENT(xfs_alloc_busy_force); -DEFINE_BUSY_EVENT(xfs_alloc_busy_reuse); -DEFINE_BUSY_EVENT(xfs_alloc_busy_clear); - -TRACE_EVENT(xfs_alloc_busy_trim, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t agbno, xfs_extlen_t len, - xfs_agblock_t tbno, xfs_extlen_t tlen), - TP_ARGS(mp, agno, agbno, len, tbno, tlen), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(xfs_extlen_t, len) - __field(xfs_agblock_t, tbno) - __field(xfs_extlen_t, tlen) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->agbno = agbno; - __entry->len = len; - __entry->tbno = tbno; - __entry->tlen = tlen; - ), - TP_printk("dev %d:%d agno %u agbno %u len %u tbno %u tlen %u", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __entry->len, - __entry->tbno, - __entry->tlen) -); - -TRACE_EVENT(xfs_trans_commit_lsn, - TP_PROTO(struct xfs_trans *trans), - TP_ARGS(trans), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(struct xfs_trans *, tp) - __field(xfs_lsn_t, lsn) - ), - TP_fast_assign( - __entry->dev = trans->t_mountp->m_super->s_dev; - __entry->tp = trans; - __entry->lsn = trans->t_commit_lsn; - ), - TP_printk("dev %d:%d trans 0x%p commit_lsn 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->tp, - __entry->lsn) -); - -TRACE_EVENT(xfs_agf, - TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, - unsigned long caller_ip), - TP_ARGS(mp, agf, flags, caller_ip), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(int, flags) - __field(__u32, length) - __field(__u32, bno_root) - __field(__u32, cnt_root) - __field(__u32, bno_level) - __field(__u32, cnt_level) - __field(__u32, flfirst) - __field(__u32, fllast) - __field(__u32, flcount) - __field(__u32, freeblks) - __field(__u32, longest) - __field(unsigned long, caller_ip) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = be32_to_cpu(agf->agf_seqno), - __entry->flags = flags; - __entry->length = be32_to_cpu(agf->agf_length), - __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), - __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]), - __entry->bno_level = - be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]), - __entry->cnt_level = - be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]), - __entry->flfirst = be32_to_cpu(agf->agf_flfirst), - __entry->fllast = be32_to_cpu(agf->agf_fllast), - __entry->flcount = be32_to_cpu(agf->agf_flcount), - __entry->freeblks = be32_to_cpu(agf->agf_freeblks), - __entry->longest = be32_to_cpu(agf->agf_longest); - __entry->caller_ip = caller_ip; - ), - TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u " - "levels b %u c %u flfirst %u fllast %u flcount %u " - "freeblks %u longest %u caller %pf", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __print_flags(__entry->flags, "|", XFS_AGF_FLAGS), - __entry->length, - __entry->bno_root, - __entry->cnt_root, - __entry->bno_level, - __entry->cnt_level, - __entry->flfirst, - __entry->fllast, - __entry->flcount, - __entry->freeblks, - __entry->longest, - (void *)__entry->caller_ip) -); - -TRACE_EVENT(xfs_free_extent, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, - xfs_extlen_t len, bool isfl, int haveleft, int haveright), - TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(xfs_extlen_t, len) - __field(int, isfl) - __field(int, haveleft) - __field(int, haveright) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->agbno = agbno; - __entry->len = len; - __entry->isfl = isfl; - __entry->haveleft = haveleft; - __entry->haveright = haveright; - ), - TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __entry->len, - __entry->isfl, - __entry->haveleft ? - (__entry->haveright ? "both" : "left") : - (__entry->haveright ? "right" : "none")) - -); - -DECLARE_EVENT_CLASS(xfs_alloc_class, - TP_PROTO(struct xfs_alloc_arg *args), - TP_ARGS(args), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(xfs_extlen_t, minlen) - __field(xfs_extlen_t, maxlen) - __field(xfs_extlen_t, mod) - __field(xfs_extlen_t, prod) - __field(xfs_extlen_t, minleft) - __field(xfs_extlen_t, total) - __field(xfs_extlen_t, alignment) - __field(xfs_extlen_t, minalignslop) - __field(xfs_extlen_t, len) - __field(short, type) - __field(short, otype) - __field(char, wasdel) - __field(char, wasfromfl) - __field(char, isfl) - __field(char, userdata) - __field(xfs_fsblock_t, firstblock) - ), - TP_fast_assign( - __entry->dev = args->mp->m_super->s_dev; - __entry->agno = args->agno; - __entry->agbno = args->agbno; - __entry->minlen = args->minlen; - __entry->maxlen = args->maxlen; - __entry->mod = args->mod; - __entry->prod = args->prod; - __entry->minleft = args->minleft; - __entry->total = args->total; - __entry->alignment = args->alignment; - __entry->minalignslop = args->minalignslop; - __entry->len = args->len; - __entry->type = args->type; - __entry->otype = args->otype; - __entry->wasdel = args->wasdel; - __entry->wasfromfl = args->wasfromfl; - __entry->isfl = args->isfl; - __entry->userdata = args->userdata; - __entry->firstblock = args->firstblock; - ), - TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " - "prod %u minleft %u total %u alignment %u minalignslop %u " - "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " - "userdata %d firstblock 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __entry->minlen, - __entry->maxlen, - __entry->mod, - __entry->prod, - __entry->minleft, - __entry->total, - __entry->alignment, - __entry->minalignslop, - __entry->len, - __print_symbolic(__entry->type, XFS_ALLOC_TYPES), - __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), - __entry->wasdel, - __entry->wasfromfl, - __entry->isfl, - __entry->userdata, - (unsigned long long)__entry->firstblock) -) - -#define DEFINE_ALLOC_EVENT(name) \ -DEFINE_EVENT(xfs_alloc_class, name, \ - TP_PROTO(struct xfs_alloc_arg *args), \ - TP_ARGS(args)) -DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); -DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound); -DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); -DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); -DEFINE_ALLOC_EVENT(xfs_alloc_near_first); -DEFINE_ALLOC_EVENT(xfs_alloc_near_greater); -DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser); -DEFINE_ALLOC_EVENT(xfs_alloc_near_error); -DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry); -DEFINE_ALLOC_EVENT(xfs_alloc_near_busy); -DEFINE_ALLOC_EVENT(xfs_alloc_size_neither); -DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry); -DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft); -DEFINE_ALLOC_EVENT(xfs_alloc_size_done); -DEFINE_ALLOC_EVENT(xfs_alloc_size_error); -DEFINE_ALLOC_EVENT(xfs_alloc_size_busy); -DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist); -DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough); -DEFINE_ALLOC_EVENT(xfs_alloc_small_done); -DEFINE_ALLOC_EVENT(xfs_alloc_small_error); -DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs); -DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix); -DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp); -DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed); -DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed); - -DECLARE_EVENT_CLASS(xfs_dir2_class, - TP_PROTO(struct xfs_da_args *args), - TP_ARGS(args), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __dynamic_array(char, name, args->namelen) - __field(int, namelen) - __field(xfs_dahash_t, hashval) - __field(xfs_ino_t, inumber) - __field(int, op_flags) - ), - TP_fast_assign( - __entry->dev = VFS_I(args->dp)->i_sb->s_dev; - __entry->ino = args->dp->i_ino; - if (args->namelen) - memcpy(__get_str(name), args->name, args->namelen); - __entry->namelen = args->namelen; - __entry->hashval = args->hashval; - __entry->inumber = args->inumber; - __entry->op_flags = args->op_flags; - ), - TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " - "inumber 0x%llx op_flags %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->namelen, - __entry->namelen ? __get_str(name) : NULL, - __entry->namelen, - __entry->hashval, - __entry->inumber, - __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) -) - -#define DEFINE_DIR2_EVENT(name) \ -DEFINE_EVENT(xfs_dir2_class, name, \ - TP_PROTO(struct xfs_da_args *args), \ - TP_ARGS(args)) -DEFINE_DIR2_EVENT(xfs_dir2_sf_addname); -DEFINE_DIR2_EVENT(xfs_dir2_sf_create); -DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup); -DEFINE_DIR2_EVENT(xfs_dir2_sf_replace); -DEFINE_DIR2_EVENT(xfs_dir2_sf_removename); -DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4); -DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8); -DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block); -DEFINE_DIR2_EVENT(xfs_dir2_block_addname); -DEFINE_DIR2_EVENT(xfs_dir2_block_lookup); -DEFINE_DIR2_EVENT(xfs_dir2_block_replace); -DEFINE_DIR2_EVENT(xfs_dir2_block_removename); -DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf); -DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf); -DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname); -DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup); -DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace); -DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename); -DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block); -DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node); -DEFINE_DIR2_EVENT(xfs_dir2_node_addname); -DEFINE_DIR2_EVENT(xfs_dir2_node_lookup); -DEFINE_DIR2_EVENT(xfs_dir2_node_replace); -DEFINE_DIR2_EVENT(xfs_dir2_node_removename); -DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf); - -DECLARE_EVENT_CLASS(xfs_dir2_space_class, - TP_PROTO(struct xfs_da_args *args, int idx), - TP_ARGS(args, idx), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(int, op_flags) - __field(int, idx) - ), - TP_fast_assign( - __entry->dev = VFS_I(args->dp)->i_sb->s_dev; - __entry->ino = args->dp->i_ino; - __entry->op_flags = args->op_flags; - __entry->idx = idx; - ), - TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), - __entry->idx) -) - -#define DEFINE_DIR2_SPACE_EVENT(name) \ -DEFINE_EVENT(xfs_dir2_space_class, name, \ - TP_PROTO(struct xfs_da_args *args, int idx), \ - TP_ARGS(args, idx)) -DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add); -DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove); -DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode); -DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode); - -TRACE_EVENT(xfs_dir2_leafn_moveents, - TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count), - TP_ARGS(args, src_idx, dst_idx, count), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(int, op_flags) - __field(int, src_idx) - __field(int, dst_idx) - __field(int, count) - ), - TP_fast_assign( - __entry->dev = VFS_I(args->dp)->i_sb->s_dev; - __entry->ino = args->dp->i_ino; - __entry->op_flags = args->op_flags; - __entry->src_idx = src_idx; - __entry->dst_idx = dst_idx; - __entry->count = count; - ), - TP_printk("dev %d:%d ino 0x%llx op_flags %s " - "src_idx %d dst_idx %d count %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), - __entry->src_idx, - __entry->dst_idx, - __entry->count) -); - -#define XFS_SWAPEXT_INODES \ - { 0, "target" }, \ - { 1, "temp" } - -#define XFS_INODE_FORMAT_STR \ - { 0, "invalid" }, \ - { 1, "local" }, \ - { 2, "extent" }, \ - { 3, "btree" } - -DECLARE_EVENT_CLASS(xfs_swap_extent_class, - TP_PROTO(struct xfs_inode *ip, int which), - TP_ARGS(ip, which), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(int, which) - __field(xfs_ino_t, ino) - __field(int, format) - __field(int, nex) - __field(int, max_nex) - __field(int, broot_size) - __field(int, fork_off) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->which = which; - __entry->ino = ip->i_ino; - __entry->format = ip->i_d.di_format; - __entry->nex = ip->i_d.di_nextents; - __entry->max_nex = ip->i_df.if_ext_max; - __entry->broot_size = ip->i_df.if_broot_bytes; - __entry->fork_off = XFS_IFORK_BOFF(ip); - ), - TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, " - "Max in-fork extents %d, broot size %d, fork offset %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_symbolic(__entry->which, XFS_SWAPEXT_INODES), - __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR), - __entry->nex, - __entry->max_nex, - __entry->broot_size, - __entry->fork_off) -) - -#define DEFINE_SWAPEXT_EVENT(name) \ -DEFINE_EVENT(xfs_swap_extent_class, name, \ - TP_PROTO(struct xfs_inode *ip, int which), \ - TP_ARGS(ip, which)) - -DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before); -DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); - -DECLARE_EVENT_CLASS(xfs_log_recover_item_class, - TP_PROTO(struct log *log, struct xlog_recover *trans, - struct xlog_recover_item *item, int pass), - TP_ARGS(log, trans, item, pass), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(unsigned long, item) - __field(xlog_tid_t, tid) - __field(int, type) - __field(int, pass) - __field(int, count) - __field(int, total) - ), - TP_fast_assign( - __entry->dev = log->l_mp->m_super->s_dev; - __entry->item = (unsigned long)item; - __entry->tid = trans->r_log_tid; - __entry->type = ITEM_TYPE(item); - __entry->pass = pass; - __entry->count = item->ri_cnt; - __entry->total = item->ri_total; - ), - TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s " - "item region count/total %d/%d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->tid, - __entry->pass, - (void *)__entry->item, - __print_symbolic(__entry->type, XFS_LI_TYPE_DESC), - __entry->count, - __entry->total) -) - -#define DEFINE_LOG_RECOVER_ITEM(name) \ -DEFINE_EVENT(xfs_log_recover_item_class, name, \ - TP_PROTO(struct log *log, struct xlog_recover *trans, \ - struct xlog_recover_item *item, int pass), \ - TP_ARGS(log, trans, item, pass)) - -DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add); -DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont); -DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head); -DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail); -DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); - -DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, - TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), - TP_ARGS(log, buf_f), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(__int64_t, blkno) - __field(unsigned short, len) - __field(unsigned short, flags) - __field(unsigned short, size) - __field(unsigned int, map_size) - ), - TP_fast_assign( - __entry->dev = log->l_mp->m_super->s_dev; - __entry->blkno = buf_f->blf_blkno; - __entry->len = buf_f->blf_len; - __entry->flags = buf_f->blf_flags; - __entry->size = buf_f->blf_size; - __entry->map_size = buf_f->blf_map_size; - ), - TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, " - "map_size %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->blkno, - __entry->len, - __entry->flags, - __entry->size, - __entry->map_size) -) - -#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ -DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ - TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \ - TP_ARGS(log, buf_f)) - -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf); -DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); - -DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, - TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), - TP_ARGS(log, in_f), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(unsigned short, size) - __field(int, fields) - __field(unsigned short, asize) - __field(unsigned short, dsize) - __field(__int64_t, blkno) - __field(int, len) - __field(int, boffset) - ), - TP_fast_assign( - __entry->dev = log->l_mp->m_super->s_dev; - __entry->ino = in_f->ilf_ino; - __entry->size = in_f->ilf_size; - __entry->fields = in_f->ilf_fields; - __entry->asize = in_f->ilf_asize; - __entry->dsize = in_f->ilf_dsize; - __entry->blkno = in_f->ilf_blkno; - __entry->len = in_f->ilf_len; - __entry->boffset = in_f->ilf_boffset; - ), - TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, " - "dsize %d, blkno 0x%llx, len %d, boffset %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->size, - __entry->fields, - __entry->asize, - __entry->dsize, - __entry->blkno, - __entry->len, - __entry->boffset) -) -#define DEFINE_LOG_RECOVER_INO_ITEM(name) \ -DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ - TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \ - TP_ARGS(log, in_f)) - -DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); -DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel); -DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip); - -DECLARE_EVENT_CLASS(xfs_discard_class, - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t agbno, xfs_extlen_t len), - TP_ARGS(mp, agno, agbno, len), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) - __field(xfs_extlen_t, len) - ), - TP_fast_assign( - __entry->dev = mp->m_super->s_dev; - __entry->agno = agno; - __entry->agbno = agbno; - __entry->len = len; - ), - TP_printk("dev %d:%d agno %u agbno %u len %u\n", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->agno, - __entry->agbno, - __entry->len) -) - -#define DEFINE_DISCARD_EVENT(name) \ -DEFINE_EVENT(xfs_discard_class, name, \ - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ - xfs_agblock_t agbno, xfs_extlen_t len), \ - TP_ARGS(mp, agno, agbno, len)) -DEFINE_DISCARD_EVENT(xfs_discard_extent); -DEFINE_DISCARD_EVENT(xfs_discard_toosmall); -DEFINE_DISCARD_EVENT(xfs_discard_exclude); -DEFINE_DISCARD_EVENT(xfs_discard_busy); - -#endif /* _TRACE_XFS_H */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#define TRACE_INCLUDE_FILE xfs_trace -#include diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h deleted file mode 100644 index 7c220b4..0000000 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_VNODE_H__ -#define __XFS_VNODE_H__ - -#include "xfs_fs.h" - -struct file; -struct xfs_inode; -struct xfs_iomap; -struct attrlist_cursor_kern; - -/* - * Return values for xfs_inactive. A return value of - * VN_INACTIVE_NOCACHE implies that the file system behavior - * has disassociated its state and bhv_desc_t from the vnode. - */ -#define VN_INACTIVE_CACHE 0 -#define VN_INACTIVE_NOCACHE 1 - -/* - * Flags for read/write calls - same values as IRIX - */ -#define IO_ISDIRECT 0x00004 /* bypass page cache */ -#define IO_INVIS 0x00020 /* don't update inode timestamps */ - -#define XFS_IO_FLAGS \ - { IO_ISDIRECT, "DIRECT" }, \ - { IO_INVIS, "INVIS"} - -/* - * Flush/Invalidate options for vop_toss/flush/flushinval_pages. - */ -#define FI_NONE 0 /* none */ -#define FI_REMAPF 1 /* Do a remapf prior to the operation */ -#define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation. - Prevent VM access to the pages until - the operation completes. */ - -/* - * Some useful predicates. - */ -#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) -#define VN_CACHED(vp) (vp->i_mapping->nrpages) -#define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \ - PAGECACHE_TAG_DIRTY) - - -#endif /* __XFS_VNODE_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c deleted file mode 100644 index 87d3e03..0000000 --- a/fs/xfs/linux-2.6/xfs_xattr.c +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright (C) 2008 Christoph Hellwig. - * Portions Copyright (C) 2000-2008 Silicon Graphics, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "xfs.h" -#include "xfs_da_btree.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_attr.h" -#include "xfs_attr_leaf.h" -#include "xfs_acl.h" -#include "xfs_vnodeops.h" - -#include -#include - - -static int -xfs_xattr_get(struct dentry *dentry, const char *name, - void *value, size_t size, int xflags) -{ - struct xfs_inode *ip = XFS_I(dentry->d_inode); - int error, asize = size; - - if (strcmp(name, "") == 0) - return -EINVAL; - - /* Convert Linux syscall to XFS internal ATTR flags */ - if (!size) { - xflags |= ATTR_KERNOVAL; - value = NULL; - } - - error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); - if (error) - return error; - return asize; -} - -static int -xfs_xattr_set(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags, int xflags) -{ - struct xfs_inode *ip = XFS_I(dentry->d_inode); - - if (strcmp(name, "") == 0) - return -EINVAL; - - /* Convert Linux syscall to XFS internal ATTR flags */ - if (flags & XATTR_CREATE) - xflags |= ATTR_CREATE; - if (flags & XATTR_REPLACE) - xflags |= ATTR_REPLACE; - - if (!value) - return -xfs_attr_remove(ip, (unsigned char *)name, xflags); - return -xfs_attr_set(ip, (unsigned char *)name, - (void *)value, size, xflags); -} - -static const struct xattr_handler xfs_xattr_user_handler = { - .prefix = XATTR_USER_PREFIX, - .flags = 0, /* no flags implies user namespace */ - .get = xfs_xattr_get, - .set = xfs_xattr_set, -}; - -static const struct xattr_handler xfs_xattr_trusted_handler = { - .prefix = XATTR_TRUSTED_PREFIX, - .flags = ATTR_ROOT, - .get = xfs_xattr_get, - .set = xfs_xattr_set, -}; - -static const struct xattr_handler xfs_xattr_security_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .flags = ATTR_SECURE, - .get = xfs_xattr_get, - .set = xfs_xattr_set, -}; - -const struct xattr_handler *xfs_xattr_handlers[] = { - &xfs_xattr_user_handler, - &xfs_xattr_trusted_handler, - &xfs_xattr_security_handler, -#ifdef CONFIG_XFS_POSIX_ACL - &xfs_xattr_acl_access_handler, - &xfs_xattr_acl_default_handler, -#endif - NULL -}; - -static unsigned int xfs_xattr_prefix_len(int flags) -{ - if (flags & XFS_ATTR_SECURE) - return sizeof("security"); - else if (flags & XFS_ATTR_ROOT) - return sizeof("trusted"); - else - return sizeof("user"); -} - -static const char *xfs_xattr_prefix(int flags) -{ - if (flags & XFS_ATTR_SECURE) - return xfs_xattr_security_handler.prefix; - else if (flags & XFS_ATTR_ROOT) - return xfs_xattr_trusted_handler.prefix; - else - return xfs_xattr_user_handler.prefix; -} - -static int -xfs_xattr_put_listent( - struct xfs_attr_list_context *context, - int flags, - unsigned char *name, - int namelen, - int valuelen, - unsigned char *value) -{ - unsigned int prefix_len = xfs_xattr_prefix_len(flags); - char *offset; - int arraytop; - - ASSERT(context->count >= 0); - - /* - * Only show root namespace entries if we are actually allowed to - * see them. - */ - if ((flags & XFS_ATTR_ROOT) && !capable(CAP_SYS_ADMIN)) - return 0; - - arraytop = context->count + prefix_len + namelen + 1; - if (arraytop > context->firstu) { - context->count = -1; /* insufficient space */ - return 1; - } - offset = (char *)context->alist + context->count; - strncpy(offset, xfs_xattr_prefix(flags), prefix_len); - offset += prefix_len; - strncpy(offset, (char *)name, namelen); /* real name */ - offset += namelen; - *offset = '\0'; - context->count += prefix_len + namelen + 1; - return 0; -} - -static int -xfs_xattr_put_listent_sizes( - struct xfs_attr_list_context *context, - int flags, - unsigned char *name, - int namelen, - int valuelen, - unsigned char *value) -{ - context->count += xfs_xattr_prefix_len(flags) + namelen + 1; - return 0; -} - -static int -list_one_attr(const char *name, const size_t len, void *data, - size_t size, ssize_t *result) -{ - char *p = data + *result; - - *result += len; - if (!size) - return 0; - if (*result > size) - return -ERANGE; - - strcpy(p, name); - return 0; -} - -ssize_t -xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) -{ - struct xfs_attr_list_context context; - struct attrlist_cursor_kern cursor = { 0 }; - struct inode *inode = dentry->d_inode; - int error; - - /* - * First read the regular on-disk attributes. - */ - memset(&context, 0, sizeof(context)); - context.dp = XFS_I(inode); - context.cursor = &cursor; - context.resynch = 1; - context.alist = data; - context.bufsize = size; - context.firstu = context.bufsize; - - if (size) - context.put_listent = xfs_xattr_put_listent; - else - context.put_listent = xfs_xattr_put_listent_sizes; - - xfs_attr_list_int(&context); - if (context.count < 0) - return -ERANGE; - - /* - * Then add the two synthetic ACL attributes. - */ - if (posix_acl_access_exists(inode)) { - error = list_one_attr(POSIX_ACL_XATTR_ACCESS, - strlen(POSIX_ACL_XATTR_ACCESS) + 1, - data, size, &context.count); - if (error) - return error; - } - - if (posix_acl_default_exists(inode)) { - error = list_one_attr(POSIX_ACL_XATTR_DEFAULT, - strlen(POSIX_ACL_XATTR_DEFAULT) + 1, - data, size, &context.count); - if (error) - return error; - } - - return context.count; -} diff --git a/fs/xfs/mrlock.h b/fs/xfs/mrlock.h new file mode 100644 index 0000000..ff6a198 --- /dev/null +++ b/fs/xfs/mrlock.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SUPPORT_MRLOCK_H__ +#define __XFS_SUPPORT_MRLOCK_H__ + +#include + +typedef struct { + struct rw_semaphore mr_lock; +#ifdef DEBUG + int mr_writer; +#endif +} mrlock_t; + +#ifdef DEBUG +#define mrinit(mrp, name) \ + do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0) +#else +#define mrinit(mrp, name) \ + do { init_rwsem(&(mrp)->mr_lock); } while (0) +#endif + +#define mrlock_init(mrp, t,n,s) mrinit(mrp, n) +#define mrfree(mrp) do { } while (0) + +static inline void mraccess_nested(mrlock_t *mrp, int subclass) +{ + down_read_nested(&mrp->mr_lock, subclass); +} + +static inline void mrupdate_nested(mrlock_t *mrp, int subclass) +{ + down_write_nested(&mrp->mr_lock, subclass); +#ifdef DEBUG + mrp->mr_writer = 1; +#endif +} + +static inline int mrtryaccess(mrlock_t *mrp) +{ + return down_read_trylock(&mrp->mr_lock); +} + +static inline int mrtryupdate(mrlock_t *mrp) +{ + if (!down_write_trylock(&mrp->mr_lock)) + return 0; +#ifdef DEBUG + mrp->mr_writer = 1; +#endif + return 1; +} + +static inline void mrunlock_excl(mrlock_t *mrp) +{ +#ifdef DEBUG + mrp->mr_writer = 0; +#endif + up_write(&mrp->mr_lock); +} + +static inline void mrunlock_shared(mrlock_t *mrp) +{ + up_read(&mrp->mr_lock); +} + +static inline void mrdemote(mrlock_t *mrp) +{ +#ifdef DEBUG + mrp->mr_writer = 0; +#endif + downgrade_write(&mrp->mr_lock); +} + +#endif /* __XFS_SUPPORT_MRLOCK_H__ */ diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c deleted file mode 100644 index db62959..0000000 --- a/fs/xfs/quota/xfs_dquot.c +++ /dev/null @@ -1,1454 +0,0 @@ -/* - * Copyright (c) 2000-2003 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_itable.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_trans_space.h" -#include "xfs_trans_priv.h" -#include "xfs_qm.h" -#include "xfs_trace.h" - - -/* - LOCK ORDER - - inode lock (ilock) - dquot hash-chain lock (hashlock) - xqm dquot freelist lock (freelistlock - mount's dquot list lock (mplistlock) - user dquot lock - lock ordering among dquots is based on the uid or gid - group dquot lock - similar to udquots. Between the two dquots, the udquot - has to be locked first. - pin lock - the dquot lock must be held to take this lock. - flush lock - ditto. -*/ - -#ifdef DEBUG -xfs_buftarg_t *xfs_dqerror_target; -int xfs_do_dqerror; -int xfs_dqreq_num; -int xfs_dqerror_mod = 33; -#endif - -static struct lock_class_key xfs_dquot_other_class; - -/* - * Allocate and initialize a dquot. We don't always allocate fresh memory; - * we try to reclaim a free dquot if the number of incore dquots are above - * a threshold. - * The only field inside the core that gets initialized at this point - * is the d_id field. The idea is to fill in the entire q_core - * when we read in the on disk dquot. - */ -STATIC xfs_dquot_t * -xfs_qm_dqinit( - xfs_mount_t *mp, - xfs_dqid_t id, - uint type) -{ - xfs_dquot_t *dqp; - boolean_t brandnewdquot; - - brandnewdquot = xfs_qm_dqalloc_incore(&dqp); - dqp->dq_flags = type; - dqp->q_core.d_id = cpu_to_be32(id); - dqp->q_mount = mp; - - /* - * No need to re-initialize these if this is a reclaimed dquot. - */ - if (brandnewdquot) { - INIT_LIST_HEAD(&dqp->q_freelist); - mutex_init(&dqp->q_qlock); - init_waitqueue_head(&dqp->q_pinwait); - - /* - * Because we want to use a counting completion, complete - * the flush completion once to allow a single access to - * the flush completion without blocking. - */ - init_completion(&dqp->q_flush); - complete(&dqp->q_flush); - - trace_xfs_dqinit(dqp); - } else { - /* - * Only the q_core portion was zeroed in dqreclaim_one(). - * So, we need to reset others. - */ - dqp->q_nrefs = 0; - dqp->q_blkno = 0; - INIT_LIST_HEAD(&dqp->q_mplist); - INIT_LIST_HEAD(&dqp->q_hashlist); - dqp->q_bufoffset = 0; - dqp->q_fileoffset = 0; - dqp->q_transp = NULL; - dqp->q_gdquot = NULL; - dqp->q_res_bcount = 0; - dqp->q_res_icount = 0; - dqp->q_res_rtbcount = 0; - atomic_set(&dqp->q_pincount, 0); - dqp->q_hash = NULL; - ASSERT(list_empty(&dqp->q_freelist)); - - trace_xfs_dqreuse(dqp); - } - - /* - * In either case we need to make sure group quotas have a different - * lock class than user quotas, to make sure lockdep knows we can - * locks of one of each at the same time. - */ - if (!(type & XFS_DQ_USER)) - lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); - - /* - * log item gets initialized later - */ - return (dqp); -} - -/* - * This is called to free all the memory associated with a dquot - */ -void -xfs_qm_dqdestroy( - xfs_dquot_t *dqp) -{ - ASSERT(list_empty(&dqp->q_freelist)); - - mutex_destroy(&dqp->q_qlock); - kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); - - atomic_dec(&xfs_Gqm->qm_totaldquots); -} - -/* - * This is what a 'fresh' dquot inside a dquot chunk looks like on disk. - */ -STATIC void -xfs_qm_dqinit_core( - xfs_dqid_t id, - uint type, - xfs_dqblk_t *d) -{ - /* - * Caller has zero'd the entire dquot 'chunk' already. - */ - d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); - d->dd_diskdq.d_version = XFS_DQUOT_VERSION; - d->dd_diskdq.d_id = cpu_to_be32(id); - d->dd_diskdq.d_flags = type; -} - -/* - * If default limits are in force, push them into the dquot now. - * We overwrite the dquot limits only if they are zero and this - * is not the root dquot. - */ -void -xfs_qm_adjust_dqlimits( - xfs_mount_t *mp, - xfs_disk_dquot_t *d) -{ - xfs_quotainfo_t *q = mp->m_quotainfo; - - ASSERT(d->d_id); - - if (q->qi_bsoftlimit && !d->d_blk_softlimit) - d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); - if (q->qi_bhardlimit && !d->d_blk_hardlimit) - d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); - if (q->qi_isoftlimit && !d->d_ino_softlimit) - d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); - if (q->qi_ihardlimit && !d->d_ino_hardlimit) - d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); - if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) - d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); - if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) - d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); -} - -/* - * Check the limits and timers of a dquot and start or reset timers - * if necessary. - * This gets called even when quota enforcement is OFF, which makes our - * life a little less complicated. (We just don't reject any quota - * reservations in that case, when enforcement is off). - * We also return 0 as the values of the timers in Q_GETQUOTA calls, when - * enforcement's off. - * In contrast, warnings are a little different in that they don't - * 'automatically' get started when limits get exceeded. They do - * get reset to zero, however, when we find the count to be under - * the soft limit (they are only ever set non-zero via userspace). - */ -void -xfs_qm_adjust_dqtimers( - xfs_mount_t *mp, - xfs_disk_dquot_t *d) -{ - ASSERT(d->d_id); - -#ifdef DEBUG - if (d->d_blk_hardlimit) - ASSERT(be64_to_cpu(d->d_blk_softlimit) <= - be64_to_cpu(d->d_blk_hardlimit)); - if (d->d_ino_hardlimit) - ASSERT(be64_to_cpu(d->d_ino_softlimit) <= - be64_to_cpu(d->d_ino_hardlimit)); - if (d->d_rtb_hardlimit) - ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= - be64_to_cpu(d->d_rtb_hardlimit)); -#endif - - if (!d->d_btimer) { - if ((d->d_blk_softlimit && - (be64_to_cpu(d->d_bcount) >= - be64_to_cpu(d->d_blk_softlimit))) || - (d->d_blk_hardlimit && - (be64_to_cpu(d->d_bcount) >= - be64_to_cpu(d->d_blk_hardlimit)))) { - d->d_btimer = cpu_to_be32(get_seconds() + - mp->m_quotainfo->qi_btimelimit); - } else { - d->d_bwarns = 0; - } - } else { - if ((!d->d_blk_softlimit || - (be64_to_cpu(d->d_bcount) < - be64_to_cpu(d->d_blk_softlimit))) && - (!d->d_blk_hardlimit || - (be64_to_cpu(d->d_bcount) < - be64_to_cpu(d->d_blk_hardlimit)))) { - d->d_btimer = 0; - } - } - - if (!d->d_itimer) { - if ((d->d_ino_softlimit && - (be64_to_cpu(d->d_icount) >= - be64_to_cpu(d->d_ino_softlimit))) || - (d->d_ino_hardlimit && - (be64_to_cpu(d->d_icount) >= - be64_to_cpu(d->d_ino_hardlimit)))) { - d->d_itimer = cpu_to_be32(get_seconds() + - mp->m_quotainfo->qi_itimelimit); - } else { - d->d_iwarns = 0; - } - } else { - if ((!d->d_ino_softlimit || - (be64_to_cpu(d->d_icount) < - be64_to_cpu(d->d_ino_softlimit))) && - (!d->d_ino_hardlimit || - (be64_to_cpu(d->d_icount) < - be64_to_cpu(d->d_ino_hardlimit)))) { - d->d_itimer = 0; - } - } - - if (!d->d_rtbtimer) { - if ((d->d_rtb_softlimit && - (be64_to_cpu(d->d_rtbcount) >= - be64_to_cpu(d->d_rtb_softlimit))) || - (d->d_rtb_hardlimit && - (be64_to_cpu(d->d_rtbcount) >= - be64_to_cpu(d->d_rtb_hardlimit)))) { - d->d_rtbtimer = cpu_to_be32(get_seconds() + - mp->m_quotainfo->qi_rtbtimelimit); - } else { - d->d_rtbwarns = 0; - } - } else { - if ((!d->d_rtb_softlimit || - (be64_to_cpu(d->d_rtbcount) < - be64_to_cpu(d->d_rtb_softlimit))) && - (!d->d_rtb_hardlimit || - (be64_to_cpu(d->d_rtbcount) < - be64_to_cpu(d->d_rtb_hardlimit)))) { - d->d_rtbtimer = 0; - } - } -} - -/* - * initialize a buffer full of dquots and log the whole thing - */ -STATIC void -xfs_qm_init_dquot_blk( - xfs_trans_t *tp, - xfs_mount_t *mp, - xfs_dqid_t id, - uint type, - xfs_buf_t *bp) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - xfs_dqblk_t *d; - int curid, i; - - ASSERT(tp); - ASSERT(xfs_buf_islocked(bp)); - - d = bp->b_addr; - - /* - * ID of the first dquot in the block - id's are zero based. - */ - curid = id - (id % q->qi_dqperchunk); - ASSERT(curid >= 0); - memset(d, 0, BBTOB(q->qi_dqchunklen)); - for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) - xfs_qm_dqinit_core(curid, type, d); - xfs_trans_dquot_buf(tp, bp, - (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : - ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : - XFS_BLF_GDQUOT_BUF))); - xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); -} - - - -/* - * Allocate a block and fill it with dquots. - * This is called when the bmapi finds a hole. - */ -STATIC int -xfs_qm_dqalloc( - xfs_trans_t **tpp, - xfs_mount_t *mp, - xfs_dquot_t *dqp, - xfs_inode_t *quotip, - xfs_fileoff_t offset_fsb, - xfs_buf_t **O_bpp) -{ - xfs_fsblock_t firstblock; - xfs_bmap_free_t flist; - xfs_bmbt_irec_t map; - int nmaps, error, committed; - xfs_buf_t *bp; - xfs_trans_t *tp = *tpp; - - ASSERT(tp != NULL); - - trace_xfs_dqalloc(dqp); - - /* - * Initialize the bmap freelist prior to calling bmapi code. - */ - xfs_bmap_init(&flist, &firstblock); - xfs_ilock(quotip, XFS_ILOCK_EXCL); - /* - * Return if this type of quotas is turned off while we didn't - * have an inode lock - */ - if (XFS_IS_THIS_QUOTA_OFF(dqp)) { - xfs_iunlock(quotip, XFS_ILOCK_EXCL); - return (ESRCH); - } - - xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL); - nmaps = 1; - if ((error = xfs_bmapi(tp, quotip, - offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, - XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, - &firstblock, - XFS_QM_DQALLOC_SPACE_RES(mp), - &map, &nmaps, &flist))) { - goto error0; - } - ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); - ASSERT(nmaps == 1); - ASSERT((map.br_startblock != DELAYSTARTBLOCK) && - (map.br_startblock != HOLESTARTBLOCK)); - - /* - * Keep track of the blkno to save a lookup later - */ - dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); - - /* now we can just get the buffer (there's nothing to read yet) */ - bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, - dqp->q_blkno, - mp->m_quotainfo->qi_dqchunklen, - 0); - if (!bp || (error = xfs_buf_geterror(bp))) - goto error1; - /* - * Make a chunk of dquots out of this buffer and log - * the entire thing. - */ - xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), - dqp->dq_flags & XFS_DQ_ALLTYPES, bp); - - /* - * xfs_bmap_finish() may commit the current transaction and - * start a second transaction if the freelist is not empty. - * - * Since we still want to modify this buffer, we need to - * ensure that the buffer is not released on commit of - * the first transaction and ensure the buffer is added to the - * second transaction. - * - * If there is only one transaction then don't stop the buffer - * from being released when it commits later on. - */ - - xfs_trans_bhold(tp, bp); - - if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { - goto error1; - } - - if (committed) { - tp = *tpp; - xfs_trans_bjoin(tp, bp); - } else { - xfs_trans_bhold_release(tp, bp); - } - - *O_bpp = bp; - return 0; - - error1: - xfs_bmap_cancel(&flist); - error0: - xfs_iunlock(quotip, XFS_ILOCK_EXCL); - - return (error); -} - -/* - * Maps a dquot to the buffer containing its on-disk version. - * This returns a ptr to the buffer containing the on-disk dquot - * in the bpp param, and a ptr to the on-disk dquot within that buffer - */ -STATIC int -xfs_qm_dqtobp( - xfs_trans_t **tpp, - xfs_dquot_t *dqp, - xfs_disk_dquot_t **O_ddpp, - xfs_buf_t **O_bpp, - uint flags) -{ - xfs_bmbt_irec_t map; - int nmaps = 1, error; - xfs_buf_t *bp; - xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); - xfs_mount_t *mp = dqp->q_mount; - xfs_disk_dquot_t *ddq; - xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); - xfs_trans_t *tp = (tpp ? *tpp : NULL); - - dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; - - xfs_ilock(quotip, XFS_ILOCK_SHARED); - if (XFS_IS_THIS_QUOTA_OFF(dqp)) { - /* - * Return if this type of quotas is turned off while we - * didn't have the quota inode lock. - */ - xfs_iunlock(quotip, XFS_ILOCK_SHARED); - return ESRCH; - } - - /* - * Find the block map; no allocations yet - */ - error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, - XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, - NULL, 0, &map, &nmaps, NULL); - - xfs_iunlock(quotip, XFS_ILOCK_SHARED); - if (error) - return error; - - ASSERT(nmaps == 1); - ASSERT(map.br_blockcount == 1); - - /* - * Offset of dquot in the (fixed sized) dquot chunk. - */ - dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * - sizeof(xfs_dqblk_t); - - ASSERT(map.br_startblock != DELAYSTARTBLOCK); - if (map.br_startblock == HOLESTARTBLOCK) { - /* - * We don't allocate unless we're asked to - */ - if (!(flags & XFS_QMOPT_DQALLOC)) - return ENOENT; - - ASSERT(tp); - error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, - dqp->q_fileoffset, &bp); - if (error) - return error; - tp = *tpp; - } else { - trace_xfs_dqtobp_read(dqp); - - /* - * store the blkno etc so that we don't have to do the - * mapping all the time - */ - dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); - - error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, - dqp->q_blkno, - mp->m_quotainfo->qi_dqchunklen, - 0, &bp); - if (error || !bp) - return XFS_ERROR(error); - } - - ASSERT(xfs_buf_islocked(bp)); - - /* - * calculate the location of the dquot inside the buffer. - */ - ddq = bp->b_addr + dqp->q_bufoffset; - - /* - * A simple sanity check in case we got a corrupted dquot... - */ - error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, - flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), - "dqtobp"); - if (error) { - if (!(flags & XFS_QMOPT_DQREPAIR)) { - xfs_trans_brelse(tp, bp); - return XFS_ERROR(EIO); - } - } - - *O_bpp = bp; - *O_ddpp = ddq; - - return (0); -} - - -/* - * Read in the ondisk dquot using dqtobp() then copy it to an incore version, - * and release the buffer immediately. - * - */ -/* ARGSUSED */ -STATIC int -xfs_qm_dqread( - xfs_trans_t **tpp, - xfs_dqid_t id, - xfs_dquot_t *dqp, /* dquot to get filled in */ - uint flags) -{ - xfs_disk_dquot_t *ddqp; - xfs_buf_t *bp; - int error; - xfs_trans_t *tp; - - ASSERT(tpp); - - trace_xfs_dqread(dqp); - - /* - * get a pointer to the on-disk dquot and the buffer containing it - * dqp already knows its own type (GROUP/USER). - */ - if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) { - return (error); - } - tp = *tpp; - - /* copy everything from disk dquot to the incore dquot */ - memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); - ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); - xfs_qm_dquot_logitem_init(dqp); - - /* - * Reservation counters are defined as reservation plus current usage - * to avoid having to add every time. - */ - dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); - dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); - dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); - - /* Mark the buf so that this will stay incore a little longer */ - XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF); - - /* - * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) - * So we need to release with xfs_trans_brelse(). - * The strategy here is identical to that of inodes; we lock - * the dquot in xfs_qm_dqget() before making it accessible to - * others. This is because dquots, like inodes, need a good level of - * concurrency, and we don't want to take locks on the entire buffers - * for dquot accesses. - * Note also that the dquot buffer may even be dirty at this point, if - * this particular dquot was repaired. We still aren't afraid to - * brelse it because we have the changes incore. - */ - ASSERT(xfs_buf_islocked(bp)); - xfs_trans_brelse(tp, bp); - - return (error); -} - - -/* - * allocate an incore dquot from the kernel heap, - * and fill its core with quota information kept on disk. - * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk - * if it wasn't already allocated. - */ -STATIC int -xfs_qm_idtodq( - xfs_mount_t *mp, - xfs_dqid_t id, /* gid or uid, depending on type */ - uint type, /* UDQUOT or GDQUOT */ - uint flags, /* DQALLOC, DQREPAIR */ - xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */ -{ - xfs_dquot_t *dqp; - int error; - xfs_trans_t *tp; - int cancelflags=0; - - dqp = xfs_qm_dqinit(mp, id, type); - tp = NULL; - if (flags & XFS_QMOPT_DQALLOC) { - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); - error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), - XFS_WRITE_LOG_RES(mp) + - BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + - 128, - 0, - XFS_TRANS_PERM_LOG_RES, - XFS_WRITE_LOG_COUNT); - if (error) { - cancelflags = 0; - goto error0; - } - cancelflags = XFS_TRANS_RELEASE_LOG_RES; - } - - /* - * Read it from disk; xfs_dqread() takes care of - * all the necessary initialization of dquot's fields (locks, etc) - */ - if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) { - /* - * This can happen if quotas got turned off (ESRCH), - * or if the dquot didn't exist on disk and we ask to - * allocate (ENOENT). - */ - trace_xfs_dqread_fail(dqp); - cancelflags |= XFS_TRANS_ABORT; - goto error0; - } - if (tp) { - if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) - goto error1; - } - - *O_dqpp = dqp; - return (0); - - error0: - ASSERT(error); - if (tp) - xfs_trans_cancel(tp, cancelflags); - error1: - xfs_qm_dqdestroy(dqp); - *O_dqpp = NULL; - return (error); -} - -/* - * Lookup a dquot in the incore dquot hashtable. We keep two separate - * hashtables for user and group dquots; and, these are global tables - * inside the XQM, not per-filesystem tables. - * The hash chain must be locked by caller, and it is left locked - * on return. Returning dquot is locked. - */ -STATIC int -xfs_qm_dqlookup( - xfs_mount_t *mp, - xfs_dqid_t id, - xfs_dqhash_t *qh, - xfs_dquot_t **O_dqpp) -{ - xfs_dquot_t *dqp; - uint flist_locked; - - ASSERT(mutex_is_locked(&qh->qh_lock)); - - flist_locked = B_FALSE; - - /* - * Traverse the hashchain looking for a match - */ - list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { - /* - * We already have the hashlock. We don't need the - * dqlock to look at the id field of the dquot, since the - * id can't be modified without the hashlock anyway. - */ - if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { - trace_xfs_dqlookup_found(dqp); - - /* - * All in core dquots must be on the dqlist of mp - */ - ASSERT(!list_empty(&dqp->q_mplist)); - - xfs_dqlock(dqp); - if (dqp->q_nrefs == 0) { - ASSERT(!list_empty(&dqp->q_freelist)); - if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { - trace_xfs_dqlookup_want(dqp); - - /* - * We may have raced with dqreclaim_one() - * (and lost). So, flag that we don't - * want the dquot to be reclaimed. - */ - dqp->dq_flags |= XFS_DQ_WANT; - xfs_dqunlock(dqp); - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - xfs_dqlock(dqp); - dqp->dq_flags &= ~(XFS_DQ_WANT); - } - flist_locked = B_TRUE; - } - - /* - * id couldn't have changed; we had the hashlock all - * along - */ - ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); - - if (flist_locked) { - if (dqp->q_nrefs != 0) { - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - flist_locked = B_FALSE; - } else { - /* take it off the freelist */ - trace_xfs_dqlookup_freelist(dqp); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; - } - } - - XFS_DQHOLD(dqp); - - if (flist_locked) - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - /* - * move the dquot to the front of the hashchain - */ - ASSERT(mutex_is_locked(&qh->qh_lock)); - list_move(&dqp->q_hashlist, &qh->qh_list); - trace_xfs_dqlookup_done(dqp); - *O_dqpp = dqp; - return 0; - } - } - - *O_dqpp = NULL; - ASSERT(mutex_is_locked(&qh->qh_lock)); - return (1); -} - -/* - * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a - * a locked dquot, doing an allocation (if requested) as needed. - * When both an inode and an id are given, the inode's id takes precedence. - * That is, if the id changes while we don't hold the ilock inside this - * function, the new dquot is returned, not necessarily the one requested - * in the id argument. - */ -int -xfs_qm_dqget( - xfs_mount_t *mp, - xfs_inode_t *ip, /* locked inode (optional) */ - xfs_dqid_t id, /* uid/projid/gid depending on type */ - uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ - uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ - xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ -{ - xfs_dquot_t *dqp; - xfs_dqhash_t *h; - uint version; - int error; - - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || - (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || - (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { - return (ESRCH); - } - h = XFS_DQ_HASH(mp, id, type); - -#ifdef DEBUG - if (xfs_do_dqerror) { - if ((xfs_dqerror_target == mp->m_ddev_targp) && - (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { - xfs_debug(mp, "Returning error in dqget"); - return (EIO); - } - } -#endif - - again: - -#ifdef DEBUG - ASSERT(type == XFS_DQ_USER || - type == XFS_DQ_PROJ || - type == XFS_DQ_GROUP); - if (ip) { - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - if (type == XFS_DQ_USER) - ASSERT(ip->i_udquot == NULL); - else - ASSERT(ip->i_gdquot == NULL); - } -#endif - mutex_lock(&h->qh_lock); - - /* - * Look in the cache (hashtable). - * The chain is kept locked during lookup. - */ - if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { - XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); - /* - * The dquot was found, moved to the front of the chain, - * taken off the freelist if it was on it, and locked - * at this point. Just unlock the hashchain and return. - */ - ASSERT(*O_dqpp); - ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); - mutex_unlock(&h->qh_lock); - trace_xfs_dqget_hit(*O_dqpp); - return (0); /* success */ - } - XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); - - /* - * Dquot cache miss. We don't want to keep the inode lock across - * a (potential) disk read. Also we don't want to deal with the lock - * ordering between quotainode and this inode. OTOH, dropping the inode - * lock here means dealing with a chown that can happen before - * we re-acquire the lock. - */ - if (ip) - xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* - * Save the hashchain version stamp, and unlock the chain, so that - * we don't keep the lock across a disk read - */ - version = h->qh_version; - mutex_unlock(&h->qh_lock); - - /* - * Allocate the dquot on the kernel heap, and read the ondisk - * portion off the disk. Also, do all the necessary initialization - * This can return ENOENT if dquot didn't exist on disk and we didn't - * ask it to allocate; ESRCH if quotas got turned off suddenly. - */ - if ((error = xfs_qm_idtodq(mp, id, type, - flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR| - XFS_QMOPT_DOWARN), - &dqp))) { - if (ip) - xfs_ilock(ip, XFS_ILOCK_EXCL); - return (error); - } - - /* - * See if this is mount code calling to look at the overall quota limits - * which are stored in the id == 0 user or group's dquot. - * Since we may not have done a quotacheck by this point, just return - * the dquot without attaching it to any hashtables, lists, etc, or even - * taking a reference. - * The caller must dqdestroy this once done. - */ - if (flags & XFS_QMOPT_DQSUSER) { - ASSERT(id == 0); - ASSERT(! ip); - goto dqret; - } - - /* - * Dquot lock comes after hashlock in the lock ordering - */ - if (ip) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - - /* - * A dquot could be attached to this inode by now, since - * we had dropped the ilock. - */ - if (type == XFS_DQ_USER) { - if (!XFS_IS_UQUOTA_ON(mp)) { - /* inode stays locked on return */ - xfs_qm_dqdestroy(dqp); - return XFS_ERROR(ESRCH); - } - if (ip->i_udquot) { - xfs_qm_dqdestroy(dqp); - dqp = ip->i_udquot; - xfs_dqlock(dqp); - goto dqret; - } - } else { - if (!XFS_IS_OQUOTA_ON(mp)) { - /* inode stays locked on return */ - xfs_qm_dqdestroy(dqp); - return XFS_ERROR(ESRCH); - } - if (ip->i_gdquot) { - xfs_qm_dqdestroy(dqp); - dqp = ip->i_gdquot; - xfs_dqlock(dqp); - goto dqret; - } - } - } - - /* - * Hashlock comes after ilock in lock order - */ - mutex_lock(&h->qh_lock); - if (version != h->qh_version) { - xfs_dquot_t *tmpdqp; - /* - * Now, see if somebody else put the dquot in the - * hashtable before us. This can happen because we didn't - * keep the hashchain lock. We don't have to worry about - * lock order between the two dquots here since dqp isn't - * on any findable lists yet. - */ - if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { - /* - * Duplicate found. Just throw away the new dquot - * and start over. - */ - xfs_qm_dqput(tmpdqp); - mutex_unlock(&h->qh_lock); - xfs_qm_dqdestroy(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); - goto again; - } - } - - /* - * Put the dquot at the beginning of the hash-chain and mp's list - * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. - */ - ASSERT(mutex_is_locked(&h->qh_lock)); - dqp->q_hash = h; - list_add(&dqp->q_hashlist, &h->qh_list); - h->qh_version++; - - /* - * Attach this dquot to this filesystem's list of all dquots, - * kept inside the mount structure in m_quotainfo field - */ - mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); - - /* - * We return a locked dquot to the caller, with a reference taken - */ - xfs_dqlock(dqp); - dqp->q_nrefs = 1; - - list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); - mp->m_quotainfo->qi_dquots++; - mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); - mutex_unlock(&h->qh_lock); - dqret: - ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); - trace_xfs_dqget_miss(dqp); - *O_dqpp = dqp; - return (0); -} - - -/* - * Release a reference to the dquot (decrement ref-count) - * and unlock it. If there is a group quota attached to this - * dquot, carefully release that too without tripping over - * deadlocks'n'stuff. - */ -void -xfs_qm_dqput( - xfs_dquot_t *dqp) -{ - xfs_dquot_t *gdqp; - - ASSERT(dqp->q_nrefs > 0); - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - - trace_xfs_dqput(dqp); - - if (dqp->q_nrefs != 1) { - dqp->q_nrefs--; - xfs_dqunlock(dqp); - return; - } - - /* - * drop the dqlock and acquire the freelist and dqlock - * in the right order; but try to get it out-of-order first - */ - if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { - trace_xfs_dqput_wait(dqp); - xfs_dqunlock(dqp); - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - xfs_dqlock(dqp); - } - - while (1) { - gdqp = NULL; - - /* We can't depend on nrefs being == 1 here */ - if (--dqp->q_nrefs == 0) { - trace_xfs_dqput_free(dqp); - - list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); - xfs_Gqm->qm_dqfrlist_cnt++; - - /* - * If we just added a udquot to the freelist, then - * we want to release the gdquot reference that - * it (probably) has. Otherwise it'll keep the - * gdquot from getting reclaimed. - */ - if ((gdqp = dqp->q_gdquot)) { - /* - * Avoid a recursive dqput call - */ - xfs_dqlock(gdqp); - dqp->q_gdquot = NULL; - } - } - xfs_dqunlock(dqp); - - /* - * If we had a group quota inside the user quota as a hint, - * release it now. - */ - if (! gdqp) - break; - dqp = gdqp; - } - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); -} - -/* - * Release a dquot. Flush it if dirty, then dqput() it. - * dquot must not be locked. - */ -void -xfs_qm_dqrele( - xfs_dquot_t *dqp) -{ - if (!dqp) - return; - - trace_xfs_dqrele(dqp); - - xfs_dqlock(dqp); - /* - * We don't care to flush it if the dquot is dirty here. - * That will create stutters that we want to avoid. - * Instead we do a delayed write when we try to reclaim - * a dirty dquot. Also xfs_sync will take part of the burden... - */ - xfs_qm_dqput(dqp); -} - -/* - * This is the dquot flushing I/O completion routine. It is called - * from interrupt level when the buffer containing the dquot is - * flushed to disk. It is responsible for removing the dquot logitem - * from the AIL if it has not been re-logged, and unlocking the dquot's - * flush lock. This behavior is very similar to that of inodes.. - */ -STATIC void -xfs_qm_dqflush_done( - struct xfs_buf *bp, - struct xfs_log_item *lip) -{ - xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; - xfs_dquot_t *dqp = qip->qli_dquot; - struct xfs_ail *ailp = lip->li_ailp; - - /* - * We only want to pull the item from the AIL if its - * location in the log has not changed since we started the flush. - * Thus, we only bother if the dquot's lsn has - * not changed. First we check the lsn outside the lock - * since it's cheaper, and then we recheck while - * holding the lock before removing the dquot from the AIL. - */ - if ((lip->li_flags & XFS_LI_IN_AIL) && - lip->li_lsn == qip->qli_flush_lsn) { - - /* xfs_trans_ail_delete() drops the AIL lock. */ - spin_lock(&ailp->xa_lock); - if (lip->li_lsn == qip->qli_flush_lsn) - xfs_trans_ail_delete(ailp, lip); - else - spin_unlock(&ailp->xa_lock); - } - - /* - * Release the dq's flush lock since we're done with it. - */ - xfs_dqfunlock(dqp); -} - -/* - * Write a modified dquot to disk. - * The dquot must be locked and the flush lock too taken by caller. - * The flush lock will not be unlocked until the dquot reaches the disk, - * but the dquot is free to be unlocked and modified by the caller - * in the interim. Dquot is still locked on return. This behavior is - * identical to that of inodes. - */ -int -xfs_qm_dqflush( - xfs_dquot_t *dqp, - uint flags) -{ - struct xfs_mount *mp = dqp->q_mount; - struct xfs_buf *bp; - struct xfs_disk_dquot *ddqp; - int error; - - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(!completion_done(&dqp->q_flush)); - - trace_xfs_dqflush(dqp); - - /* - * If not dirty, or it's pinned and we are not supposed to block, nada. - */ - if (!XFS_DQ_IS_DIRTY(dqp) || - (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { - xfs_dqfunlock(dqp); - return 0; - } - xfs_qm_dqunpin_wait(dqp); - - /* - * This may have been unpinned because the filesystem is shutting - * down forcibly. If that's the case we must not write this dquot - * to disk, because the log record didn't make it to disk! - */ - if (XFS_FORCED_SHUTDOWN(mp)) { - dqp->dq_flags &= ~XFS_DQ_DIRTY; - xfs_dqfunlock(dqp); - return XFS_ERROR(EIO); - } - - /* - * Get the buffer containing the on-disk dquot - */ - error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, - mp->m_quotainfo->qi_dqchunklen, 0, &bp); - if (error) { - ASSERT(error != ENOENT); - xfs_dqfunlock(dqp); - return error; - } - - /* - * Calculate the location of the dquot inside the buffer. - */ - ddqp = bp->b_addr + dqp->q_bufoffset; - - /* - * A simple sanity check in case we got a corrupted dquot.. - */ - error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, - XFS_QMOPT_DOWARN, "dqflush (incore copy)"); - if (error) { - xfs_buf_relse(bp); - xfs_dqfunlock(dqp); - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - return XFS_ERROR(EIO); - } - - /* This is the only portion of data that needs to persist */ - memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); - - /* - * Clear the dirty field and remember the flush lsn for later use. - */ - dqp->dq_flags &= ~XFS_DQ_DIRTY; - - xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, - &dqp->q_logitem.qli_item.li_lsn); - - /* - * Attach an iodone routine so that we can remove this dquot from the - * AIL and release the flush lock once the dquot is synced to disk. - */ - xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, - &dqp->q_logitem.qli_item); - - /* - * If the buffer is pinned then push on the log so we won't - * get stuck waiting in the write for too long. - */ - if (xfs_buf_ispinned(bp)) { - trace_xfs_dqflush_force(dqp); - xfs_log_force(mp, 0); - } - - if (flags & SYNC_WAIT) - error = xfs_bwrite(mp, bp); - else - xfs_bdwrite(mp, bp); - - trace_xfs_dqflush_done(dqp); - - /* - * dqp is still locked, but caller is free to unlock it now. - */ - return error; - -} - -int -xfs_qm_dqlock_nowait( - xfs_dquot_t *dqp) -{ - return mutex_trylock(&dqp->q_qlock); -} - -void -xfs_dqlock( - xfs_dquot_t *dqp) -{ - mutex_lock(&dqp->q_qlock); -} - -void -xfs_dqunlock( - xfs_dquot_t *dqp) -{ - mutex_unlock(&(dqp->q_qlock)); - if (dqp->q_logitem.qli_dquot == dqp) { - /* Once was dqp->q_mount, but might just have been cleared */ - xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, - (xfs_log_item_t*)&(dqp->q_logitem)); - } -} - - -void -xfs_dqunlock_nonotify( - xfs_dquot_t *dqp) -{ - mutex_unlock(&(dqp->q_qlock)); -} - -/* - * Lock two xfs_dquot structures. - * - * To avoid deadlocks we always lock the quota structure with - * the lowerd id first. - */ -void -xfs_dqlock2( - xfs_dquot_t *d1, - xfs_dquot_t *d2) -{ - if (d1 && d2) { - ASSERT(d1 != d2); - if (be32_to_cpu(d1->q_core.d_id) > - be32_to_cpu(d2->q_core.d_id)) { - mutex_lock(&d2->q_qlock); - mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); - } else { - mutex_lock(&d1->q_qlock); - mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); - } - } else if (d1) { - mutex_lock(&d1->q_qlock); - } else if (d2) { - mutex_lock(&d2->q_qlock); - } -} - - -/* - * Take a dquot out of the mount's dqlist as well as the hashlist. - * This is called via unmount as well as quotaoff, and the purge - * will always succeed unless there are soft (temp) references - * outstanding. - * - * This returns 0 if it was purged, 1 if it wasn't. It's not an error code - * that we're returning! XXXsup - not cool. - */ -/* ARGSUSED */ -int -xfs_qm_dqpurge( - xfs_dquot_t *dqp) -{ - xfs_dqhash_t *qh = dqp->q_hash; - xfs_mount_t *mp = dqp->q_mount; - - ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock)); - ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock)); - - xfs_dqlock(dqp); - /* - * We really can't afford to purge a dquot that is - * referenced, because these are hard refs. - * It shouldn't happen in general because we went thru _all_ inodes in - * dqrele_all_inodes before calling this and didn't let the mountlock go. - * However it is possible that we have dquots with temporary - * references that are not attached to an inode. e.g. see xfs_setattr(). - */ - if (dqp->q_nrefs != 0) { - xfs_dqunlock(dqp); - mutex_unlock(&dqp->q_hash->qh_lock); - return (1); - } - - ASSERT(!list_empty(&dqp->q_freelist)); - - /* - * If we're turning off quotas, we have to make sure that, for - * example, we don't delete quota disk blocks while dquots are - * in the process of getting written to those disk blocks. - * This dquot might well be on AIL, and we can't leave it there - * if we're turning off quotas. Basically, we need this flush - * lock, and are willing to block on it. - */ - if (!xfs_dqflock_nowait(dqp)) { - /* - * Block on the flush lock after nudging dquot buffer, - * if it is incore. - */ - xfs_qm_dqflock_pushbuf_wait(dqp); - } - - /* - * XXXIf we're turning this type of quotas off, we don't care - * about the dirty metadata sitting in this dquot. OTOH, if - * we're unmounting, we do care, so we flush it and wait. - */ - if (XFS_DQ_IS_DIRTY(dqp)) { - int error; - - /* dqflush unlocks dqflock */ - /* - * Given that dqpurge is a very rare occurrence, it is OK - * that we're holding the hashlist and mplist locks - * across the disk write. But, ... XXXsup - * - * We don't care about getting disk errors here. We need - * to purge this dquot anyway, so we go ahead regardless. - */ - error = xfs_qm_dqflush(dqp, SYNC_WAIT); - if (error) - xfs_warn(mp, "%s: dquot %p flush failed", - __func__, dqp); - xfs_dqflock(dqp); - } - ASSERT(atomic_read(&dqp->q_pincount) == 0); - ASSERT(XFS_FORCED_SHUTDOWN(mp) || - !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); - - list_del_init(&dqp->q_hashlist); - qh->qh_version++; - list_del_init(&dqp->q_mplist); - mp->m_quotainfo->qi_dqreclaims++; - mp->m_quotainfo->qi_dquots--; - /* - * XXX Move this to the front of the freelist, if we can get the - * freelist lock. - */ - ASSERT(!list_empty(&dqp->q_freelist)); - - dqp->q_mount = NULL; - dqp->q_hash = NULL; - dqp->dq_flags = XFS_DQ_INACTIVE; - memset(&dqp->q_core, 0, sizeof(dqp->q_core)); - xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); - mutex_unlock(&qh->qh_lock); - return (0); -} - - -/* - * Give the buffer a little push if it is incore and - * wait on the flush lock. - */ -void -xfs_qm_dqflock_pushbuf_wait( - xfs_dquot_t *dqp) -{ - xfs_mount_t *mp = dqp->q_mount; - xfs_buf_t *bp; - - /* - * Check to see if the dquot has been flushed delayed - * write. If so, grab its buffer and send it - * out immediately. We'll be able to acquire - * the flush lock when the I/O completes. - */ - bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, - mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); - if (!bp) - goto out_lock; - - if (XFS_BUF_ISDELAYWRITE(bp)) { - if (xfs_buf_ispinned(bp)) - xfs_log_force(mp, 0); - xfs_buf_delwri_promote(bp); - wake_up_process(bp->b_target->bt_task); - } - xfs_buf_relse(bp); -out_lock: - xfs_dqflock(dqp); -} diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h deleted file mode 100644 index 34b7e94..0000000 --- a/fs/xfs/quota/xfs_dquot.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DQUOT_H__ -#define __XFS_DQUOT_H__ - -/* - * Dquots are structures that hold quota information about a user or a group, - * much like inodes are for files. In fact, dquots share many characteristics - * with inodes. However, dquots can also be a centralized resource, relative - * to a collection of inodes. In this respect, dquots share some characteristics - * of the superblock. - * XFS dquots exploit both those in its algorithms. They make every attempt - * to not be a bottleneck when quotas are on and have minimal impact, if any, - * when quotas are off. - */ - -/* - * The hash chain headers (hash buckets) - */ -typedef struct xfs_dqhash { - struct list_head qh_list; - struct mutex qh_lock; - uint qh_version; /* ever increasing version */ - uint qh_nelems; /* number of dquots on the list */ -} xfs_dqhash_t; - -struct xfs_mount; -struct xfs_trans; - -/* - * The incore dquot structure - */ -typedef struct xfs_dquot { - uint dq_flags; /* various flags (XFS_DQ_*) */ - struct list_head q_freelist; /* global free list of dquots */ - struct list_head q_mplist; /* mount's list of dquots */ - struct list_head q_hashlist; /* gloabl hash list of dquots */ - xfs_dqhash_t *q_hash; /* the hashchain header */ - struct xfs_mount*q_mount; /* filesystem this relates to */ - struct xfs_trans*q_transp; /* trans this belongs to currently */ - uint q_nrefs; /* # active refs from inodes */ - xfs_daddr_t q_blkno; /* blkno of dquot buffer */ - int q_bufoffset; /* off of dq in buffer (# dquots) */ - xfs_fileoff_t q_fileoffset; /* offset in quotas file */ - - struct xfs_dquot*q_gdquot; /* group dquot, hint only */ - xfs_disk_dquot_t q_core; /* actual usage & quotas */ - xfs_dq_logitem_t q_logitem; /* dquot log item */ - xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ - xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ - xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ - struct mutex q_qlock; /* quota lock */ - struct completion q_flush; /* flush completion queue */ - atomic_t q_pincount; /* dquot pin count */ - wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ -} xfs_dquot_t; - -/* - * Lock hierarchy for q_qlock: - * XFS_QLOCK_NORMAL is the implicit default, - * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 - */ -enum { - XFS_QLOCK_NORMAL = 0, - XFS_QLOCK_NESTED, -}; - -#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) - -/* - * Manage the q_flush completion queue embedded in the dquot. This completion - * queue synchronizes processes attempting to flush the in-core dquot back to - * disk. - */ -static inline void xfs_dqflock(xfs_dquot_t *dqp) -{ - wait_for_completion(&dqp->q_flush); -} - -static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) -{ - return try_wait_for_completion(&dqp->q_flush); -} - -static inline void xfs_dqfunlock(xfs_dquot_t *dqp) -{ - complete(&dqp->q_flush); -} - -#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) -#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) -#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) -#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) -#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) -#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) -#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ - XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ - XFS_DQ_TO_QINF(dqp)->qi_gquotaip) - -#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ - (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ - (XFS_IS_OQUOTA_ON((d)->q_mount)))) - -extern void xfs_qm_dqdestroy(xfs_dquot_t *); -extern int xfs_qm_dqflush(xfs_dquot_t *, uint); -extern int xfs_qm_dqpurge(xfs_dquot_t *); -extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); -extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); -extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); -extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, - xfs_disk_dquot_t *); -extern void xfs_qm_adjust_dqlimits(xfs_mount_t *, - xfs_disk_dquot_t *); -extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, - xfs_dqid_t, uint, uint, xfs_dquot_t **); -extern void xfs_qm_dqput(xfs_dquot_t *); -extern void xfs_dqlock(xfs_dquot_t *); -extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); -extern void xfs_dqunlock(xfs_dquot_t *); -extern void xfs_dqunlock_nonotify(xfs_dquot_t *); - -#endif /* __XFS_DQUOT_H__ */ diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c deleted file mode 100644 index 9e0e2fa..0000000 --- a/fs/xfs/quota/xfs_dquot_item.c +++ /dev/null @@ -1,529 +0,0 @@ -/* - * Copyright (c) 2000-2003 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_itable.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_trans_priv.h" -#include "xfs_qm.h" - -static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip) -{ - return container_of(lip, struct xfs_dq_logitem, qli_item); -} - -/* - * returns the number of iovecs needed to log the given dquot item. - */ -STATIC uint -xfs_qm_dquot_logitem_size( - struct xfs_log_item *lip) -{ - /* - * we need only two iovecs, one for the format, one for the real thing - */ - return 2; -} - -/* - * fills in the vector of log iovecs for the given dquot log item. - */ -STATIC void -xfs_qm_dquot_logitem_format( - struct xfs_log_item *lip, - struct xfs_log_iovec *logvec) -{ - struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); - - logvec->i_addr = &qlip->qli_format; - logvec->i_len = sizeof(xfs_dq_logformat_t); - logvec->i_type = XLOG_REG_TYPE_QFORMAT; - logvec++; - logvec->i_addr = &qlip->qli_dquot->q_core; - logvec->i_len = sizeof(xfs_disk_dquot_t); - logvec->i_type = XLOG_REG_TYPE_DQUOT; - - ASSERT(2 == lip->li_desc->lid_size); - qlip->qli_format.qlf_size = 2; - -} - -/* - * Increment the pin count of the given dquot. - */ -STATIC void -xfs_qm_dquot_logitem_pin( - struct xfs_log_item *lip) -{ - struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; - - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - atomic_inc(&dqp->q_pincount); -} - -/* - * Decrement the pin count of the given dquot, and wake up - * anyone in xfs_dqwait_unpin() if the count goes to 0. The - * dquot must have been previously pinned with a call to - * xfs_qm_dquot_logitem_pin(). - */ -STATIC void -xfs_qm_dquot_logitem_unpin( - struct xfs_log_item *lip, - int remove) -{ - struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; - - ASSERT(atomic_read(&dqp->q_pincount) > 0); - if (atomic_dec_and_test(&dqp->q_pincount)) - wake_up(&dqp->q_pinwait); -} - -/* - * Given the logitem, this writes the corresponding dquot entry to disk - * asynchronously. This is called with the dquot entry securely locked; - * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot - * at the end. - */ -STATIC void -xfs_qm_dquot_logitem_push( - struct xfs_log_item *lip) -{ - struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; - int error; - - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(!completion_done(&dqp->q_flush)); - - /* - * Since we were able to lock the dquot's flush lock and - * we found it on the AIL, the dquot must be dirty. This - * is because the dquot is removed from the AIL while still - * holding the flush lock in xfs_dqflush_done(). Thus, if - * we found it in the AIL and were able to obtain the flush - * lock without sleeping, then there must not have been - * anyone in the process of flushing the dquot. - */ - error = xfs_qm_dqflush(dqp, 0); - if (error) - xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p", - __func__, error, dqp); - xfs_dqunlock(dqp); -} - -STATIC xfs_lsn_t -xfs_qm_dquot_logitem_committed( - struct xfs_log_item *lip, - xfs_lsn_t lsn) -{ - /* - * We always re-log the entire dquot when it becomes dirty, - * so, the latest copy _is_ the only one that matters. - */ - return lsn; -} - -/* - * This is called to wait for the given dquot to be unpinned. - * Most of these pin/unpin routines are plagiarized from inode code. - */ -void -xfs_qm_dqunpin_wait( - struct xfs_dquot *dqp) -{ - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - if (atomic_read(&dqp->q_pincount) == 0) - return; - - /* - * Give the log a push so we don't wait here too long. - */ - xfs_log_force(dqp->q_mount, 0); - wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); -} - -/* - * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that - * the dquot is locked by us, but the flush lock isn't. So, here we are - * going to see if the relevant dquot buffer is incore, waiting on DELWRI. - * If so, we want to push it out to help us take this item off the AIL as soon - * as possible. - * - * We must not be holding the AIL lock at this point. Calling incore() to - * search the buffer cache can be a time consuming thing, and AIL lock is a - * spinlock. - */ -STATIC void -xfs_qm_dquot_logitem_pushbuf( - struct xfs_log_item *lip) -{ - struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); - struct xfs_dquot *dqp = qlip->qli_dquot; - struct xfs_buf *bp; - - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - - /* - * If flushlock isn't locked anymore, chances are that the - * inode flush completed and the inode was taken off the AIL. - * So, just get out. - */ - if (completion_done(&dqp->q_flush) || - !(lip->li_flags & XFS_LI_IN_AIL)) { - xfs_dqunlock(dqp); - return; - } - - bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, - dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); - xfs_dqunlock(dqp); - if (!bp) - return; - if (XFS_BUF_ISDELAYWRITE(bp)) - xfs_buf_delwri_promote(bp); - xfs_buf_relse(bp); -} - -/* - * This is called to attempt to lock the dquot associated with this - * dquot log item. Don't sleep on the dquot lock or the flush lock. - * If the flush lock is already held, indicating that the dquot has - * been or is in the process of being flushed, then see if we can - * find the dquot's buffer in the buffer cache without sleeping. If - * we can and it is marked delayed write, then we want to send it out. - * We delay doing so until the push routine, though, to avoid sleeping - * in any device strategy routines. - */ -STATIC uint -xfs_qm_dquot_logitem_trylock( - struct xfs_log_item *lip) -{ - struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; - - if (atomic_read(&dqp->q_pincount) > 0) - return XFS_ITEM_PINNED; - - if (!xfs_qm_dqlock_nowait(dqp)) - return XFS_ITEM_LOCKED; - - if (!xfs_dqflock_nowait(dqp)) { - /* - * dquot has already been flushed to the backing buffer, - * leave it locked, pushbuf routine will unlock it. - */ - return XFS_ITEM_PUSHBUF; - } - - ASSERT(lip->li_flags & XFS_LI_IN_AIL); - return XFS_ITEM_SUCCESS; -} - -/* - * Unlock the dquot associated with the log item. - * Clear the fields of the dquot and dquot log item that - * are specific to the current transaction. If the - * hold flags is set, do not unlock the dquot. - */ -STATIC void -xfs_qm_dquot_logitem_unlock( - struct xfs_log_item *lip) -{ - struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; - - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - - /* - * Clear the transaction pointer in the dquot - */ - dqp->q_transp = NULL; - - /* - * dquots are never 'held' from getting unlocked at the end of - * a transaction. Their locking and unlocking is hidden inside the - * transaction layer, within trans_commit. Hence, no LI_HOLD flag - * for the logitem. - */ - xfs_dqunlock(dqp); -} - -/* - * this needs to stamp an lsn into the dquot, I think. - * rpc's that look at user dquot's would then have to - * push on the dependency recorded in the dquot - */ -STATIC void -xfs_qm_dquot_logitem_committing( - struct xfs_log_item *lip, - xfs_lsn_t lsn) -{ -} - -/* - * This is the ops vector for dquots - */ -static struct xfs_item_ops xfs_dquot_item_ops = { - .iop_size = xfs_qm_dquot_logitem_size, - .iop_format = xfs_qm_dquot_logitem_format, - .iop_pin = xfs_qm_dquot_logitem_pin, - .iop_unpin = xfs_qm_dquot_logitem_unpin, - .iop_trylock = xfs_qm_dquot_logitem_trylock, - .iop_unlock = xfs_qm_dquot_logitem_unlock, - .iop_committed = xfs_qm_dquot_logitem_committed, - .iop_push = xfs_qm_dquot_logitem_push, - .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf, - .iop_committing = xfs_qm_dquot_logitem_committing -}; - -/* - * Initialize the dquot log item for a newly allocated dquot. - * The dquot isn't locked at this point, but it isn't on any of the lists - * either, so we don't care. - */ -void -xfs_qm_dquot_logitem_init( - struct xfs_dquot *dqp) -{ - struct xfs_dq_logitem *lp = &dqp->q_logitem; - - xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, - &xfs_dquot_item_ops); - lp->qli_dquot = dqp; - lp->qli_format.qlf_type = XFS_LI_DQUOT; - lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id); - lp->qli_format.qlf_blkno = dqp->q_blkno; - lp->qli_format.qlf_len = 1; - /* - * This is just the offset of this dquot within its buffer - * (which is currently 1 FSB and probably won't change). - * Hence 32 bits for this offset should be just fine. - * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) - * here, and recompute it at recovery time. - */ - lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; -} - -/*------------------ QUOTAOFF LOG ITEMS -------------------*/ - -static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip) -{ - return container_of(lip, struct xfs_qoff_logitem, qql_item); -} - - -/* - * This returns the number of iovecs needed to log the given quotaoff item. - * We only need 1 iovec for an quotaoff item. It just logs the - * quotaoff_log_format structure. - */ -STATIC uint -xfs_qm_qoff_logitem_size( - struct xfs_log_item *lip) -{ - return 1; -} - -/* - * This is called to fill in the vector of log iovecs for the - * given quotaoff log item. We use only 1 iovec, and we point that - * at the quotaoff_log_format structure embedded in the quotaoff item. - * It is at this point that we assert that all of the extent - * slots in the quotaoff item have been filled. - */ -STATIC void -xfs_qm_qoff_logitem_format( - struct xfs_log_item *lip, - struct xfs_log_iovec *log_vector) -{ - struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); - - ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF); - - log_vector->i_addr = &qflip->qql_format; - log_vector->i_len = sizeof(xfs_qoff_logitem_t); - log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; - qflip->qql_format.qf_size = 1; -} - -/* - * Pinning has no meaning for an quotaoff item, so just return. - */ -STATIC void -xfs_qm_qoff_logitem_pin( - struct xfs_log_item *lip) -{ -} - -/* - * Since pinning has no meaning for an quotaoff item, unpinning does - * not either. - */ -STATIC void -xfs_qm_qoff_logitem_unpin( - struct xfs_log_item *lip, - int remove) -{ -} - -/* - * Quotaoff items have no locking, so just return success. - */ -STATIC uint -xfs_qm_qoff_logitem_trylock( - struct xfs_log_item *lip) -{ - return XFS_ITEM_LOCKED; -} - -/* - * Quotaoff items have no locking or pushing, so return failure - * so that the caller doesn't bother with us. - */ -STATIC void -xfs_qm_qoff_logitem_unlock( - struct xfs_log_item *lip) -{ -} - -/* - * The quotaoff-start-item is logged only once and cannot be moved in the log, - * so simply return the lsn at which it's been logged. - */ -STATIC xfs_lsn_t -xfs_qm_qoff_logitem_committed( - struct xfs_log_item *lip, - xfs_lsn_t lsn) -{ - return lsn; -} - -/* - * There isn't much you can do to push on an quotaoff item. It is simply - * stuck waiting for the log to be flushed to disk. - */ -STATIC void -xfs_qm_qoff_logitem_push( - struct xfs_log_item *lip) -{ -} - - -STATIC xfs_lsn_t -xfs_qm_qoffend_logitem_committed( - struct xfs_log_item *lip, - xfs_lsn_t lsn) -{ - struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip); - struct xfs_qoff_logitem *qfs = qfe->qql_start_lip; - struct xfs_ail *ailp = qfs->qql_item.li_ailp; - - /* - * Delete the qoff-start logitem from the AIL. - * xfs_trans_ail_delete() drops the AIL lock. - */ - spin_lock(&ailp->xa_lock); - xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); - - kmem_free(qfs); - kmem_free(qfe); - return (xfs_lsn_t)-1; -} - -/* - * XXX rcc - don't know quite what to do with this. I think we can - * just ignore it. The only time that isn't the case is if we allow - * the client to somehow see that quotas have been turned off in which - * we can't allow that to get back until the quotaoff hits the disk. - * So how would that happen? Also, do we need different routines for - * quotaoff start and quotaoff end? I suspect the answer is yes but - * to be sure, I need to look at the recovery code and see how quota off - * recovery is handled (do we roll forward or back or do something else). - * If we roll forwards or backwards, then we need two separate routines, - * one that does nothing and one that stamps in the lsn that matters - * (truly makes the quotaoff irrevocable). If we do something else, - * then maybe we don't need two. - */ -STATIC void -xfs_qm_qoff_logitem_committing( - struct xfs_log_item *lip, - xfs_lsn_t commit_lsn) -{ -} - -static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { - .iop_size = xfs_qm_qoff_logitem_size, - .iop_format = xfs_qm_qoff_logitem_format, - .iop_pin = xfs_qm_qoff_logitem_pin, - .iop_unpin = xfs_qm_qoff_logitem_unpin, - .iop_trylock = xfs_qm_qoff_logitem_trylock, - .iop_unlock = xfs_qm_qoff_logitem_unlock, - .iop_committed = xfs_qm_qoffend_logitem_committed, - .iop_push = xfs_qm_qoff_logitem_push, - .iop_committing = xfs_qm_qoff_logitem_committing -}; - -/* - * This is the ops vector shared by all quotaoff-start log items. - */ -static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { - .iop_size = xfs_qm_qoff_logitem_size, - .iop_format = xfs_qm_qoff_logitem_format, - .iop_pin = xfs_qm_qoff_logitem_pin, - .iop_unpin = xfs_qm_qoff_logitem_unpin, - .iop_trylock = xfs_qm_qoff_logitem_trylock, - .iop_unlock = xfs_qm_qoff_logitem_unlock, - .iop_committed = xfs_qm_qoff_logitem_committed, - .iop_push = xfs_qm_qoff_logitem_push, - .iop_committing = xfs_qm_qoff_logitem_committing -}; - -/* - * Allocate and initialize an quotaoff item of the correct quota type(s). - */ -struct xfs_qoff_logitem * -xfs_qm_qoff_logitem_init( - struct xfs_mount *mp, - struct xfs_qoff_logitem *start, - uint flags) -{ - struct xfs_qoff_logitem *qf; - - qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); - - xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? - &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); - qf->qql_item.li_mountp = mp; - qf->qql_format.qf_type = XFS_LI_QUOTAOFF; - qf->qql_format.qf_flags = flags; - qf->qql_start_lip = start; - return qf; -} diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/quota/xfs_dquot_item.h deleted file mode 100644 index 5acae2a..0000000 --- a/fs/xfs/quota/xfs_dquot_item.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (c) 2000-2003 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DQUOT_ITEM_H__ -#define __XFS_DQUOT_ITEM_H__ - -struct xfs_dquot; -struct xfs_trans; -struct xfs_mount; -struct xfs_qoff_logitem; - -typedef struct xfs_dq_logitem { - xfs_log_item_t qli_item; /* common portion */ - struct xfs_dquot *qli_dquot; /* dquot ptr */ - xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ - xfs_dq_logformat_t qli_format; /* logged structure */ -} xfs_dq_logitem_t; - -typedef struct xfs_qoff_logitem { - xfs_log_item_t qql_item; /* common portion */ - struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ - xfs_qoff_logformat_t qql_format; /* logged structure */ -} xfs_qoff_logitem_t; - - -extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *); -extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *, - struct xfs_qoff_logitem *, uint); -extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *, - struct xfs_qoff_logitem *, uint); -extern void xfs_trans_log_quotaoff_item(struct xfs_trans *, - struct xfs_qoff_logitem *); - -#endif /* __XFS_DQUOT_ITEM_H__ */ diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c deleted file mode 100644 index 9a0aa76..0000000 --- a/fs/xfs/quota/xfs_qm.c +++ /dev/null @@ -1,2416 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_ialloc.h" -#include "xfs_itable.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_bmap.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_trans_space.h" -#include "xfs_utils.h" -#include "xfs_qm.h" -#include "xfs_trace.h" - -/* - * The global quota manager. There is only one of these for the entire - * system, _not_ one per file system. XQM keeps track of the overall - * quota functionality, including maintaining the freelist and hash - * tables of dquots. - */ -struct mutex xfs_Gqm_lock; -struct xfs_qm *xfs_Gqm; -uint ndquot; - -kmem_zone_t *qm_dqzone; -kmem_zone_t *qm_dqtrxzone; - -STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); -STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); - -STATIC int xfs_qm_init_quotainos(xfs_mount_t *); -STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); -STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); - -static struct shrinker xfs_qm_shaker = { - .shrink = xfs_qm_shake, - .seeks = DEFAULT_SEEKS, -}; - -/* - * Initialize the XQM structure. - * Note that there is not one quota manager per file system. - */ -STATIC struct xfs_qm * -xfs_Gqm_init(void) -{ - xfs_dqhash_t *udqhash, *gdqhash; - xfs_qm_t *xqm; - size_t hsize; - uint i; - - /* - * Initialize the dquot hash tables. - */ - udqhash = kmem_zalloc_greedy(&hsize, - XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), - XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); - if (!udqhash) - goto out; - - gdqhash = kmem_zalloc_large(hsize); - if (!gdqhash) - goto out_free_udqhash; - - hsize /= sizeof(xfs_dqhash_t); - ndquot = hsize << 8; - - xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); - xqm->qm_dqhashmask = hsize - 1; - xqm->qm_usr_dqhtable = udqhash; - xqm->qm_grp_dqhtable = gdqhash; - ASSERT(xqm->qm_usr_dqhtable != NULL); - ASSERT(xqm->qm_grp_dqhtable != NULL); - - for (i = 0; i < hsize; i++) { - xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); - xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); - } - - /* - * Freelist of all dquots of all file systems - */ - INIT_LIST_HEAD(&xqm->qm_dqfrlist); - xqm->qm_dqfrlist_cnt = 0; - mutex_init(&xqm->qm_dqfrlist_lock); - - /* - * dquot zone. we register our own low-memory callback. - */ - if (!qm_dqzone) { - xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), - "xfs_dquots"); - qm_dqzone = xqm->qm_dqzone; - } else - xqm->qm_dqzone = qm_dqzone; - - register_shrinker(&xfs_qm_shaker); - - /* - * The t_dqinfo portion of transactions. - */ - if (!qm_dqtrxzone) { - xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), - "xfs_dqtrx"); - qm_dqtrxzone = xqm->qm_dqtrxzone; - } else - xqm->qm_dqtrxzone = qm_dqtrxzone; - - atomic_set(&xqm->qm_totaldquots, 0); - xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; - xqm->qm_nrefs = 0; - return xqm; - - out_free_udqhash: - kmem_free_large(udqhash); - out: - return NULL; -} - -/* - * Destroy the global quota manager when its reference count goes to zero. - */ -STATIC void -xfs_qm_destroy( - struct xfs_qm *xqm) -{ - struct xfs_dquot *dqp, *n; - int hsize, i; - - ASSERT(xqm != NULL); - ASSERT(xqm->qm_nrefs == 0); - unregister_shrinker(&xfs_qm_shaker); - hsize = xqm->qm_dqhashmask + 1; - for (i = 0; i < hsize; i++) { - xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); - xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); - } - kmem_free_large(xqm->qm_usr_dqhtable); - kmem_free_large(xqm->qm_grp_dqhtable); - xqm->qm_usr_dqhtable = NULL; - xqm->qm_grp_dqhtable = NULL; - xqm->qm_dqhashmask = 0; - - /* frlist cleanup */ - mutex_lock(&xqm->qm_dqfrlist_lock); - list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) { - xfs_dqlock(dqp); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; - xfs_dqunlock(dqp); - xfs_qm_dqdestroy(dqp); - } - mutex_unlock(&xqm->qm_dqfrlist_lock); - mutex_destroy(&xqm->qm_dqfrlist_lock); - kmem_free(xqm); -} - -/* - * Called at mount time to let XQM know that another file system is - * starting quotas. This isn't crucial information as the individual mount - * structures are pretty independent, but it helps the XQM keep a - * global view of what's going on. - */ -/* ARGSUSED */ -STATIC int -xfs_qm_hold_quotafs_ref( - struct xfs_mount *mp) -{ - /* - * Need to lock the xfs_Gqm structure for things like this. For example, - * the structure could disappear between the entry to this routine and - * a HOLD operation if not locked. - */ - mutex_lock(&xfs_Gqm_lock); - - if (!xfs_Gqm) { - xfs_Gqm = xfs_Gqm_init(); - if (!xfs_Gqm) { - mutex_unlock(&xfs_Gqm_lock); - return ENOMEM; - } - } - - /* - * We can keep a list of all filesystems with quotas mounted for - * debugging and statistical purposes, but ... - * Just take a reference and get out. - */ - xfs_Gqm->qm_nrefs++; - mutex_unlock(&xfs_Gqm_lock); - - return 0; -} - - -/* - * Release the reference that a filesystem took at mount time, - * so that we know when we need to destroy the entire quota manager. - */ -/* ARGSUSED */ -STATIC void -xfs_qm_rele_quotafs_ref( - struct xfs_mount *mp) -{ - xfs_dquot_t *dqp, *n; - - ASSERT(xfs_Gqm); - ASSERT(xfs_Gqm->qm_nrefs > 0); - - /* - * Go thru the freelist and destroy all inactive dquots. - */ - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - - list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) { - xfs_dqlock(dqp); - if (dqp->dq_flags & XFS_DQ_INACTIVE) { - ASSERT(dqp->q_mount == NULL); - ASSERT(! XFS_DQ_IS_DIRTY(dqp)); - ASSERT(list_empty(&dqp->q_hashlist)); - ASSERT(list_empty(&dqp->q_mplist)); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; - xfs_dqunlock(dqp); - xfs_qm_dqdestroy(dqp); - } else { - xfs_dqunlock(dqp); - } - } - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - - /* - * Destroy the entire XQM. If somebody mounts with quotaon, this'll - * be restarted. - */ - mutex_lock(&xfs_Gqm_lock); - if (--xfs_Gqm->qm_nrefs == 0) { - xfs_qm_destroy(xfs_Gqm); - xfs_Gqm = NULL; - } - mutex_unlock(&xfs_Gqm_lock); -} - -/* - * Just destroy the quotainfo structure. - */ -void -xfs_qm_unmount( - struct xfs_mount *mp) -{ - if (mp->m_quotainfo) { - xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); - xfs_qm_destroy_quotainfo(mp); - } -} - - -/* - * This is called from xfs_mountfs to start quotas and initialize all - * necessary data structures like quotainfo. This is also responsible for - * running a quotacheck as necessary. We are guaranteed that the superblock - * is consistently read in at this point. - * - * If we fail here, the mount will continue with quota turned off. We don't - * need to inidicate success or failure at all. - */ -void -xfs_qm_mount_quotas( - xfs_mount_t *mp) -{ - int error = 0; - uint sbf; - - /* - * If quotas on realtime volumes is not supported, we disable - * quotas immediately. - */ - if (mp->m_sb.sb_rextents) { - xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); - mp->m_qflags = 0; - goto write_changes; - } - - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - /* - * Allocate the quotainfo structure inside the mount struct, and - * create quotainode(s), and change/rev superblock if necessary. - */ - error = xfs_qm_init_quotainfo(mp); - if (error) { - /* - * We must turn off quotas. - */ - ASSERT(mp->m_quotainfo == NULL); - mp->m_qflags = 0; - goto write_changes; - } - /* - * If any of the quotas are not consistent, do a quotacheck. - */ - if (XFS_QM_NEED_QUOTACHECK(mp)) { - error = xfs_qm_quotacheck(mp); - if (error) { - /* Quotacheck failed and disabled quotas. */ - return; - } - } - /* - * If one type of quotas is off, then it will lose its - * quotachecked status, since we won't be doing accounting for - * that type anymore. - */ - if (!XFS_IS_UQUOTA_ON(mp)) - mp->m_qflags &= ~XFS_UQUOTA_CHKD; - if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) - mp->m_qflags &= ~XFS_OQUOTA_CHKD; - - write_changes: - /* - * We actually don't have to acquire the m_sb_lock at all. - * This can only be called from mount, and that's single threaded. XXX - */ - spin_lock(&mp->m_sb_lock); - sbf = mp->m_sb.sb_qflags; - mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; - spin_unlock(&mp->m_sb_lock); - - if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { - if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { - /* - * We could only have been turning quotas off. - * We aren't in very good shape actually because - * the incore structures are convinced that quotas are - * off, but the on disk superblock doesn't know that ! - */ - ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); - xfs_alert(mp, "%s: Superblock update failed!", - __func__); - } - } - - if (error) { - xfs_warn(mp, "Failed to initialize disk quotas."); - return; - } -} - -/* - * Called from the vfsops layer. - */ -void -xfs_qm_unmount_quotas( - xfs_mount_t *mp) -{ - /* - * Release the dquots that root inode, et al might be holding, - * before we flush quotas and blow away the quotainfo structure. - */ - ASSERT(mp->m_rootip); - xfs_qm_dqdetach(mp->m_rootip); - if (mp->m_rbmip) - xfs_qm_dqdetach(mp->m_rbmip); - if (mp->m_rsumip) - xfs_qm_dqdetach(mp->m_rsumip); - - /* - * Release the quota inodes. - */ - if (mp->m_quotainfo) { - if (mp->m_quotainfo->qi_uquotaip) { - IRELE(mp->m_quotainfo->qi_uquotaip); - mp->m_quotainfo->qi_uquotaip = NULL; - } - if (mp->m_quotainfo->qi_gquotaip) { - IRELE(mp->m_quotainfo->qi_gquotaip); - mp->m_quotainfo->qi_gquotaip = NULL; - } - } -} - -/* - * Flush all dquots of the given file system to disk. The dquots are - * _not_ purged from memory here, just their data written to disk. - */ -STATIC int -xfs_qm_dqflush_all( - struct xfs_mount *mp, - int sync_mode) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - int recl; - struct xfs_dquot *dqp; - int error; - - if (!q) - return 0; -again: - mutex_lock(&q->qi_dqlist_lock); - list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { - xfs_dqlock(dqp); - if (! XFS_DQ_IS_DIRTY(dqp)) { - xfs_dqunlock(dqp); - continue; - } - - /* XXX a sentinel would be better */ - recl = q->qi_dqreclaims; - if (!xfs_dqflock_nowait(dqp)) { - /* - * If we can't grab the flush lock then check - * to see if the dquot has been flushed delayed - * write. If so, grab its buffer and send it - * out immediately. We'll be able to acquire - * the flush lock when the I/O completes. - */ - xfs_qm_dqflock_pushbuf_wait(dqp); - } - /* - * Let go of the mplist lock. We don't want to hold it - * across a disk write. - */ - mutex_unlock(&q->qi_dqlist_lock); - error = xfs_qm_dqflush(dqp, sync_mode); - xfs_dqunlock(dqp); - if (error) - return error; - - mutex_lock(&q->qi_dqlist_lock); - if (recl != q->qi_dqreclaims) { - mutex_unlock(&q->qi_dqlist_lock); - /* XXX restart limit */ - goto again; - } - } - - mutex_unlock(&q->qi_dqlist_lock); - /* return ! busy */ - return 0; -} -/* - * Release the group dquot pointers the user dquots may be - * carrying around as a hint. mplist is locked on entry and exit. - */ -STATIC void -xfs_qm_detach_gdquots( - struct xfs_mount *mp) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_dquot *dqp, *gdqp; - int nrecl; - - again: - ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); - list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { - xfs_dqlock(dqp); - if ((gdqp = dqp->q_gdquot)) { - xfs_dqlock(gdqp); - dqp->q_gdquot = NULL; - } - xfs_dqunlock(dqp); - - if (gdqp) { - /* - * Can't hold the mplist lock across a dqput. - * XXXmust convert to marker based iterations here. - */ - nrecl = q->qi_dqreclaims; - mutex_unlock(&q->qi_dqlist_lock); - xfs_qm_dqput(gdqp); - - mutex_lock(&q->qi_dqlist_lock); - if (nrecl != q->qi_dqreclaims) - goto again; - } - } -} - -/* - * Go through all the incore dquots of this file system and take them - * off the mplist and hashlist, if the dquot type matches the dqtype - * parameter. This is used when turning off quota accounting for - * users and/or groups, as well as when the filesystem is unmounting. - */ -STATIC int -xfs_qm_dqpurge_int( - struct xfs_mount *mp, - uint flags) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_dquot *dqp, *n; - uint dqtype; - int nrecl; - int nmisses; - - if (!q) - return 0; - - dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; - dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; - dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; - - mutex_lock(&q->qi_dqlist_lock); - - /* - * In the first pass through all incore dquots of this filesystem, - * we release the group dquot pointers the user dquots may be - * carrying around as a hint. We need to do this irrespective of - * what's being turned off. - */ - xfs_qm_detach_gdquots(mp); - - again: - nmisses = 0; - ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); - /* - * Try to get rid of all of the unwanted dquots. The idea is to - * get them off mplist and hashlist, but leave them on freelist. - */ - list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { - /* - * It's OK to look at the type without taking dqlock here. - * We're holding the mplist lock here, and that's needed for - * a dqreclaim. - */ - if ((dqp->dq_flags & dqtype) == 0) - continue; - - if (!mutex_trylock(&dqp->q_hash->qh_lock)) { - nrecl = q->qi_dqreclaims; - mutex_unlock(&q->qi_dqlist_lock); - mutex_lock(&dqp->q_hash->qh_lock); - mutex_lock(&q->qi_dqlist_lock); - - /* - * XXXTheoretically, we can get into a very long - * ping pong game here. - * No one can be adding dquots to the mplist at - * this point, but somebody might be taking things off. - */ - if (nrecl != q->qi_dqreclaims) { - mutex_unlock(&dqp->q_hash->qh_lock); - goto again; - } - } - - /* - * Take the dquot off the mplist and hashlist. It may remain on - * freelist in INACTIVE state. - */ - nmisses += xfs_qm_dqpurge(dqp); - } - mutex_unlock(&q->qi_dqlist_lock); - return nmisses; -} - -int -xfs_qm_dqpurge_all( - xfs_mount_t *mp, - uint flags) -{ - int ndquots; - - /* - * Purge the dquot cache. - * None of the dquots should really be busy at this point. - */ - if (mp->m_quotainfo) { - while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { - delay(ndquots * 10); - } - } - return 0; -} - -STATIC int -xfs_qm_dqattach_one( - xfs_inode_t *ip, - xfs_dqid_t id, - uint type, - uint doalloc, - xfs_dquot_t *udqhint, /* hint */ - xfs_dquot_t **IO_idqpp) -{ - xfs_dquot_t *dqp; - int error; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - error = 0; - - /* - * See if we already have it in the inode itself. IO_idqpp is - * &i_udquot or &i_gdquot. This made the code look weird, but - * made the logic a lot simpler. - */ - dqp = *IO_idqpp; - if (dqp) { - trace_xfs_dqattach_found(dqp); - return 0; - } - - /* - * udqhint is the i_udquot field in inode, and is non-NULL only - * when the type arg is group/project. Its purpose is to save a - * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside - * the user dquot. - */ - if (udqhint) { - ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); - xfs_dqlock(udqhint); - - /* - * No need to take dqlock to look at the id. - * - * The ID can't change until it gets reclaimed, and it won't - * be reclaimed as long as we have a ref from inode and we - * hold the ilock. - */ - dqp = udqhint->q_gdquot; - if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { - xfs_dqlock(dqp); - XFS_DQHOLD(dqp); - ASSERT(*IO_idqpp == NULL); - *IO_idqpp = dqp; - - xfs_dqunlock(dqp); - xfs_dqunlock(udqhint); - return 0; - } - - /* - * We can't hold a dquot lock when we call the dqget code. - * We'll deadlock in no time, because of (not conforming to) - * lock ordering - the inodelock comes before any dquot lock, - * and we may drop and reacquire the ilock in xfs_qm_dqget(). - */ - xfs_dqunlock(udqhint); - } - - /* - * Find the dquot from somewhere. This bumps the - * reference count of dquot and returns it locked. - * This can return ENOENT if dquot didn't exist on - * disk and we didn't ask it to allocate; - * ESRCH if quotas got turned off suddenly. - */ - error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp); - if (error) - return error; - - trace_xfs_dqattach_get(dqp); - - /* - * dqget may have dropped and re-acquired the ilock, but it guarantees - * that the dquot returned is the one that should go in the inode. - */ - *IO_idqpp = dqp; - xfs_dqunlock(dqp); - return 0; -} - - -/* - * Given a udquot and gdquot, attach a ptr to the group dquot in the - * udquot as a hint for future lookups. The idea sounds simple, but the - * execution isn't, because the udquot might have a group dquot attached - * already and getting rid of that gets us into lock ordering constraints. - * The process is complicated more by the fact that the dquots may or may not - * be locked on entry. - */ -STATIC void -xfs_qm_dqattach_grouphint( - xfs_dquot_t *udq, - xfs_dquot_t *gdq) -{ - xfs_dquot_t *tmp; - - xfs_dqlock(udq); - - if ((tmp = udq->q_gdquot)) { - if (tmp == gdq) { - xfs_dqunlock(udq); - return; - } - - udq->q_gdquot = NULL; - /* - * We can't keep any dqlocks when calling dqrele, - * because the freelist lock comes before dqlocks. - */ - xfs_dqunlock(udq); - /* - * we took a hard reference once upon a time in dqget, - * so give it back when the udquot no longer points at it - * dqput() does the unlocking of the dquot. - */ - xfs_qm_dqrele(tmp); - - xfs_dqlock(udq); - xfs_dqlock(gdq); - - } else { - ASSERT(XFS_DQ_IS_LOCKED(udq)); - xfs_dqlock(gdq); - } - - ASSERT(XFS_DQ_IS_LOCKED(udq)); - ASSERT(XFS_DQ_IS_LOCKED(gdq)); - /* - * Somebody could have attached a gdquot here, - * when we dropped the uqlock. If so, just do nothing. - */ - if (udq->q_gdquot == NULL) { - XFS_DQHOLD(gdq); - udq->q_gdquot = gdq; - } - - xfs_dqunlock(gdq); - xfs_dqunlock(udq); -} - - -/* - * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON - * into account. - * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. - * Inode may get unlocked and relocked in here, and the caller must deal with - * the consequences. - */ -int -xfs_qm_dqattach_locked( - xfs_inode_t *ip, - uint flags) -{ - xfs_mount_t *mp = ip->i_mount; - uint nquotas = 0; - int error = 0; - - if (!XFS_IS_QUOTA_RUNNING(mp) || - !XFS_IS_QUOTA_ON(mp) || - !XFS_NOT_DQATTACHED(mp, ip) || - ip->i_ino == mp->m_sb.sb_uquotino || - ip->i_ino == mp->m_sb.sb_gquotino) - return 0; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - - if (XFS_IS_UQUOTA_ON(mp)) { - error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, - flags & XFS_QMOPT_DQALLOC, - NULL, &ip->i_udquot); - if (error) - goto done; - nquotas++; - } - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - if (XFS_IS_OQUOTA_ON(mp)) { - error = XFS_IS_GQUOTA_ON(mp) ? - xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, - flags & XFS_QMOPT_DQALLOC, - ip->i_udquot, &ip->i_gdquot) : - xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, - flags & XFS_QMOPT_DQALLOC, - ip->i_udquot, &ip->i_gdquot); - /* - * Don't worry about the udquot that we may have - * attached above. It'll get detached, if not already. - */ - if (error) - goto done; - nquotas++; - } - - /* - * Attach this group quota to the user quota as a hint. - * This WON'T, in general, result in a thrash. - */ - if (nquotas == 2) { - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(ip->i_udquot); - ASSERT(ip->i_gdquot); - - /* - * We may or may not have the i_udquot locked at this point, - * but this check is OK since we don't depend on the i_gdquot to - * be accurate 100% all the time. It is just a hint, and this - * will succeed in general. - */ - if (ip->i_udquot->q_gdquot == ip->i_gdquot) - goto done; - /* - * Attach i_gdquot to the gdquot hint inside the i_udquot. - */ - xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); - } - - done: -#ifdef DEBUG - if (!error) { - if (XFS_IS_UQUOTA_ON(mp)) - ASSERT(ip->i_udquot); - if (XFS_IS_OQUOTA_ON(mp)) - ASSERT(ip->i_gdquot); - } - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); -#endif - return error; -} - -int -xfs_qm_dqattach( - struct xfs_inode *ip, - uint flags) -{ - int error; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - error = xfs_qm_dqattach_locked(ip, flags); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - - return error; -} - -/* - * Release dquots (and their references) if any. - * The inode should be locked EXCL except when this's called by - * xfs_ireclaim. - */ -void -xfs_qm_dqdetach( - xfs_inode_t *ip) -{ - if (!(ip->i_udquot || ip->i_gdquot)) - return; - - trace_xfs_dquot_dqdetach(ip); - - ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); - ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); - if (ip->i_udquot) { - xfs_qm_dqrele(ip->i_udquot); - ip->i_udquot = NULL; - } - if (ip->i_gdquot) { - xfs_qm_dqrele(ip->i_gdquot); - ip->i_gdquot = NULL; - } -} - -int -xfs_qm_sync( - struct xfs_mount *mp, - int flags) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - int recl, restarts; - struct xfs_dquot *dqp; - int error; - - if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) - return 0; - - restarts = 0; - - again: - mutex_lock(&q->qi_dqlist_lock); - /* - * dqpurge_all() also takes the mplist lock and iterate thru all dquots - * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared - * when we have the mplist lock, we know that dquots will be consistent - * as long as we have it locked. - */ - if (!XFS_IS_QUOTA_ON(mp)) { - mutex_unlock(&q->qi_dqlist_lock); - return 0; - } - ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); - list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { - /* - * If this is vfs_sync calling, then skip the dquots that - * don't 'seem' to be dirty. ie. don't acquire dqlock. - * This is very similar to what xfs_sync does with inodes. - */ - if (flags & SYNC_TRYLOCK) { - if (!XFS_DQ_IS_DIRTY(dqp)) - continue; - if (!xfs_qm_dqlock_nowait(dqp)) - continue; - } else { - xfs_dqlock(dqp); - } - - /* - * Now, find out for sure if this dquot is dirty or not. - */ - if (! XFS_DQ_IS_DIRTY(dqp)) { - xfs_dqunlock(dqp); - continue; - } - - /* XXX a sentinel would be better */ - recl = q->qi_dqreclaims; - if (!xfs_dqflock_nowait(dqp)) { - if (flags & SYNC_TRYLOCK) { - xfs_dqunlock(dqp); - continue; - } - /* - * If we can't grab the flush lock then if the caller - * really wanted us to give this our best shot, so - * see if we can give a push to the buffer before we wait - * on the flush lock. At this point, we know that - * even though the dquot is being flushed, - * it has (new) dirty data. - */ - xfs_qm_dqflock_pushbuf_wait(dqp); - } - /* - * Let go of the mplist lock. We don't want to hold it - * across a disk write - */ - mutex_unlock(&q->qi_dqlist_lock); - error = xfs_qm_dqflush(dqp, flags); - xfs_dqunlock(dqp); - if (error && XFS_FORCED_SHUTDOWN(mp)) - return 0; /* Need to prevent umount failure */ - else if (error) - return error; - - mutex_lock(&q->qi_dqlist_lock); - if (recl != q->qi_dqreclaims) { - if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS) - break; - - mutex_unlock(&q->qi_dqlist_lock); - goto again; - } - } - - mutex_unlock(&q->qi_dqlist_lock); - return 0; -} - -/* - * The hash chains and the mplist use the same xfs_dqhash structure as - * their list head, but we can take the mplist qh_lock and one of the - * hash qh_locks at the same time without any problem as they aren't - * related. - */ -static struct lock_class_key xfs_quota_mplist_class; - -/* - * This initializes all the quota information that's kept in the - * mount structure - */ -STATIC int -xfs_qm_init_quotainfo( - xfs_mount_t *mp) -{ - xfs_quotainfo_t *qinf; - int error; - xfs_dquot_t *dqp; - - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - /* - * Tell XQM that we exist as soon as possible. - */ - if ((error = xfs_qm_hold_quotafs_ref(mp))) { - return error; - } - - qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); - - /* - * See if quotainodes are setup, and if not, allocate them, - * and change the superblock accordingly. - */ - if ((error = xfs_qm_init_quotainos(mp))) { - kmem_free(qinf); - mp->m_quotainfo = NULL; - return error; - } - - INIT_LIST_HEAD(&qinf->qi_dqlist); - mutex_init(&qinf->qi_dqlist_lock); - lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); - - qinf->qi_dqreclaims = 0; - - /* mutex used to serialize quotaoffs */ - mutex_init(&qinf->qi_quotaofflock); - - /* Precalc some constants */ - qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); - ASSERT(qinf->qi_dqchunklen); - qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); - do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); - - mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); - - /* - * We try to get the limits from the superuser's limits fields. - * This is quite hacky, but it is standard quota practice. - * We look at the USR dquot with id == 0 first, but if user quotas - * are not enabled we goto the GRP dquot with id == 0. - * We don't really care to keep separate default limits for user - * and group quotas, at least not at this point. - */ - error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0, - XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : - (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : - XFS_DQ_PROJ), - XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN, - &dqp); - if (! error) { - xfs_disk_dquot_t *ddqp = &dqp->q_core; - - /* - * The warnings and timers set the grace period given to - * a user or group before he or she can not perform any - * more writing. If it is zero, a default is used. - */ - qinf->qi_btimelimit = ddqp->d_btimer ? - be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; - qinf->qi_itimelimit = ddqp->d_itimer ? - be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; - qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? - be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; - qinf->qi_bwarnlimit = ddqp->d_bwarns ? - be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; - qinf->qi_iwarnlimit = ddqp->d_iwarns ? - be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; - qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? - be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; - qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); - qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); - qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); - qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); - qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); - qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); - - /* - * We sent the XFS_QMOPT_DQSUSER flag to dqget because - * we don't want this dquot cached. We haven't done a - * quotacheck yet, and quotacheck doesn't like incore dquots. - */ - xfs_qm_dqdestroy(dqp); - } else { - qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; - qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; - qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; - qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; - qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; - qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; - } - - return 0; -} - - -/* - * Gets called when unmounting a filesystem or when all quotas get - * turned off. - * This purges the quota inodes, destroys locks and frees itself. - */ -void -xfs_qm_destroy_quotainfo( - xfs_mount_t *mp) -{ - xfs_quotainfo_t *qi; - - qi = mp->m_quotainfo; - ASSERT(qi != NULL); - ASSERT(xfs_Gqm != NULL); - - /* - * Release the reference that XQM kept, so that we know - * when the XQM structure should be freed. We cannot assume - * that xfs_Gqm is non-null after this point. - */ - xfs_qm_rele_quotafs_ref(mp); - - ASSERT(list_empty(&qi->qi_dqlist)); - mutex_destroy(&qi->qi_dqlist_lock); - - if (qi->qi_uquotaip) { - IRELE(qi->qi_uquotaip); - qi->qi_uquotaip = NULL; /* paranoia */ - } - if (qi->qi_gquotaip) { - IRELE(qi->qi_gquotaip); - qi->qi_gquotaip = NULL; - } - mutex_destroy(&qi->qi_quotaofflock); - kmem_free(qi); - mp->m_quotainfo = NULL; -} - - - -/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ - -/* ARGSUSED */ -STATIC void -xfs_qm_list_init( - xfs_dqlist_t *list, - char *str, - int n) -{ - mutex_init(&list->qh_lock); - INIT_LIST_HEAD(&list->qh_list); - list->qh_version = 0; - list->qh_nelems = 0; -} - -STATIC void -xfs_qm_list_destroy( - xfs_dqlist_t *list) -{ - mutex_destroy(&(list->qh_lock)); -} - -/* - * Create an inode and return with a reference already taken, but unlocked - * This is how we create quota inodes - */ -STATIC int -xfs_qm_qino_alloc( - xfs_mount_t *mp, - xfs_inode_t **ip, - __int64_t sbfields, - uint flags) -{ - xfs_trans_t *tp; - int error; - int committed; - - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); - if ((error = xfs_trans_reserve(tp, - XFS_QM_QINOCREATE_SPACE_RES(mp), - XFS_CREATE_LOG_RES(mp), 0, - XFS_TRANS_PERM_LOG_RES, - XFS_CREATE_LOG_COUNT))) { - xfs_trans_cancel(tp, 0); - return error; - } - - error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); - if (error) { - xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | - XFS_TRANS_ABORT); - return error; - } - - /* - * Make the changes in the superblock, and log those too. - * sbfields arg may contain fields other than *QUOTINO; - * VERSIONNUM for example. - */ - spin_lock(&mp->m_sb_lock); - if (flags & XFS_QMOPT_SBVERSION) { - ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); - ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | - XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == - (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | - XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); - - xfs_sb_version_addquota(&mp->m_sb); - mp->m_sb.sb_uquotino = NULLFSINO; - mp->m_sb.sb_gquotino = NULLFSINO; - - /* qflags will get updated _after_ quotacheck */ - mp->m_sb.sb_qflags = 0; - } - if (flags & XFS_QMOPT_UQUOTA) - mp->m_sb.sb_uquotino = (*ip)->i_ino; - else - mp->m_sb.sb_gquotino = (*ip)->i_ino; - spin_unlock(&mp->m_sb_lock); - xfs_mod_sb(tp, sbfields); - - if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { - xfs_alert(mp, "%s failed (error %d)!", __func__, error); - return error; - } - return 0; -} - - -STATIC void -xfs_qm_reset_dqcounts( - xfs_mount_t *mp, - xfs_buf_t *bp, - xfs_dqid_t id, - uint type) -{ - xfs_disk_dquot_t *ddq; - int j; - - trace_xfs_reset_dqcounts(bp, _RET_IP_); - - /* - * Reset all counters and timers. They'll be - * started afresh by xfs_qm_quotacheck. - */ -#ifdef DEBUG - j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); - do_div(j, sizeof(xfs_dqblk_t)); - ASSERT(mp->m_quotainfo->qi_dqperchunk == j); -#endif - ddq = bp->b_addr; - for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { - /* - * Do a sanity check, and if needed, repair the dqblk. Don't - * output any warnings because it's perfectly possible to - * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. - */ - (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, - "xfs_quotacheck"); - ddq->d_bcount = 0; - ddq->d_icount = 0; - ddq->d_rtbcount = 0; - ddq->d_btimer = 0; - ddq->d_itimer = 0; - ddq->d_rtbtimer = 0; - ddq->d_bwarns = 0; - ddq->d_iwarns = 0; - ddq->d_rtbwarns = 0; - ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); - } -} - -STATIC int -xfs_qm_dqiter_bufs( - xfs_mount_t *mp, - xfs_dqid_t firstid, - xfs_fsblock_t bno, - xfs_filblks_t blkcnt, - uint flags) -{ - xfs_buf_t *bp; - int error; - int type; - - ASSERT(blkcnt > 0); - type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : - (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); - error = 0; - - /* - * Blkcnt arg can be a very big number, and might even be - * larger than the log itself. So, we have to break it up into - * manageable-sized transactions. - * Note that we don't start a permanent transaction here; we might - * not be able to get a log reservation for the whole thing up front, - * and we don't really care to either, because we just discard - * everything if we were to crash in the middle of this loop. - */ - while (blkcnt--) { - error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, - XFS_FSB_TO_DADDR(mp, bno), - mp->m_quotainfo->qi_dqchunklen, 0, &bp); - if (error) - break; - - xfs_qm_reset_dqcounts(mp, bp, firstid, type); - xfs_bdwrite(mp, bp); - /* - * goto the next block. - */ - bno++; - firstid += mp->m_quotainfo->qi_dqperchunk; - } - return error; -} - -/* - * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a - * caller supplied function for every chunk of dquots that we find. - */ -STATIC int -xfs_qm_dqiterate( - xfs_mount_t *mp, - xfs_inode_t *qip, - uint flags) -{ - xfs_bmbt_irec_t *map; - int i, nmaps; /* number of map entries */ - int error; /* return value */ - xfs_fileoff_t lblkno; - xfs_filblks_t maxlblkcnt; - xfs_dqid_t firstid; - xfs_fsblock_t rablkno; - xfs_filblks_t rablkcnt; - - error = 0; - /* - * This looks racy, but we can't keep an inode lock across a - * trans_reserve. But, this gets called during quotacheck, and that - * happens only at mount time which is single threaded. - */ - if (qip->i_d.di_nblocks == 0) - return 0; - - map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); - - lblkno = 0; - maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); - do { - nmaps = XFS_DQITER_MAP_SIZE; - /* - * We aren't changing the inode itself. Just changing - * some of its data. No new blocks are added here, and - * the inode is never added to the transaction. - */ - xfs_ilock(qip, XFS_ILOCK_SHARED); - error = xfs_bmapi(NULL, qip, lblkno, - maxlblkcnt - lblkno, - XFS_BMAPI_METADATA, - NULL, - 0, map, &nmaps, NULL); - xfs_iunlock(qip, XFS_ILOCK_SHARED); - if (error) - break; - - ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); - for (i = 0; i < nmaps; i++) { - ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); - ASSERT(map[i].br_blockcount); - - - lblkno += map[i].br_blockcount; - - if (map[i].br_startblock == HOLESTARTBLOCK) - continue; - - firstid = (xfs_dqid_t) map[i].br_startoff * - mp->m_quotainfo->qi_dqperchunk; - /* - * Do a read-ahead on the next extent. - */ - if ((i+1 < nmaps) && - (map[i+1].br_startblock != HOLESTARTBLOCK)) { - rablkcnt = map[i+1].br_blockcount; - rablkno = map[i+1].br_startblock; - while (rablkcnt--) { - xfs_buf_readahead(mp->m_ddev_targp, - XFS_FSB_TO_DADDR(mp, rablkno), - mp->m_quotainfo->qi_dqchunklen); - rablkno++; - } - } - /* - * Iterate thru all the blks in the extent and - * reset the counters of all the dquots inside them. - */ - if ((error = xfs_qm_dqiter_bufs(mp, - firstid, - map[i].br_startblock, - map[i].br_blockcount, - flags))) { - break; - } - } - - if (error) - break; - } while (nmaps > 0); - - kmem_free(map); - - return error; -} - -/* - * Called by dqusage_adjust in doing a quotacheck. - * - * Given the inode, and a dquot id this updates both the incore dqout as well - * as the buffer copy. This is so that once the quotacheck is done, we can - * just log all the buffers, as opposed to logging numerous updates to - * individual dquots. - */ -STATIC int -xfs_qm_quotacheck_dqadjust( - struct xfs_inode *ip, - xfs_dqid_t id, - uint type, - xfs_qcnt_t nblks, - xfs_qcnt_t rtblks) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_dquot *dqp; - int error; - - error = xfs_qm_dqget(mp, ip, id, type, - XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); - if (error) { - /* - * Shouldn't be able to turn off quotas here. - */ - ASSERT(error != ESRCH); - ASSERT(error != ENOENT); - return error; - } - - trace_xfs_dqadjust(dqp); - - /* - * Adjust the inode count and the block count to reflect this inode's - * resource usage. - */ - be64_add_cpu(&dqp->q_core.d_icount, 1); - dqp->q_res_icount++; - if (nblks) { - be64_add_cpu(&dqp->q_core.d_bcount, nblks); - dqp->q_res_bcount += nblks; - } - if (rtblks) { - be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); - dqp->q_res_rtbcount += rtblks; - } - - /* - * Set default limits, adjust timers (since we changed usages) - * - * There are no timers for the default values set in the root dquot. - */ - if (dqp->q_core.d_id) { - xfs_qm_adjust_dqlimits(mp, &dqp->q_core); - xfs_qm_adjust_dqtimers(mp, &dqp->q_core); - } - - dqp->dq_flags |= XFS_DQ_DIRTY; - xfs_qm_dqput(dqp); - return 0; -} - -STATIC int -xfs_qm_get_rtblks( - xfs_inode_t *ip, - xfs_qcnt_t *O_rtblks) -{ - xfs_filblks_t rtblks; /* total rt blks */ - xfs_extnum_t idx; /* extent record index */ - xfs_ifork_t *ifp; /* inode fork pointer */ - xfs_extnum_t nextents; /* number of extent entries */ - int error; - - ASSERT(XFS_IS_REALTIME_INODE(ip)); - ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); - if (!(ifp->if_flags & XFS_IFEXTENTS)) { - if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) - return error; - } - rtblks = 0; - nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); - for (idx = 0; idx < nextents; idx++) - rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); - *O_rtblks = (xfs_qcnt_t)rtblks; - return 0; -} - -/* - * callback routine supplied to bulkstat(). Given an inumber, find its - * dquots and update them to account for resources taken by that inode. - */ -/* ARGSUSED */ -STATIC int -xfs_qm_dqusage_adjust( - xfs_mount_t *mp, /* mount point for filesystem */ - xfs_ino_t ino, /* inode number to get data for */ - void __user *buffer, /* not used */ - int ubsize, /* not used */ - int *ubused, /* not used */ - int *res) /* result code value */ -{ - xfs_inode_t *ip; - xfs_qcnt_t nblks, rtblks = 0; - int error; - - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - /* - * rootino must have its resources accounted for, not so with the quota - * inodes. - */ - if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { - *res = BULKSTAT_RV_NOTHING; - return XFS_ERROR(EINVAL); - } - - /* - * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget - * interface expects the inode to be exclusively locked because that's - * the case in all other instances. It's OK that we do this because - * quotacheck is done only at mount time. - */ - error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); - if (error) { - *res = BULKSTAT_RV_NOTHING; - return error; - } - - ASSERT(ip->i_delayed_blks == 0); - - if (XFS_IS_REALTIME_INODE(ip)) { - /* - * Walk thru the extent list and count the realtime blocks. - */ - error = xfs_qm_get_rtblks(ip, &rtblks); - if (error) - goto error0; - } - - nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; - - /* - * Add the (disk blocks and inode) resources occupied by this - * inode to its dquots. We do this adjustment in the incore dquot, - * and also copy the changes to its buffer. - * We don't care about putting these changes in a transaction - * envelope because if we crash in the middle of a 'quotacheck' - * we have to start from the beginning anyway. - * Once we're done, we'll log all the dquot bufs. - * - * The *QUOTA_ON checks below may look pretty racy, but quotachecks - * and quotaoffs don't race. (Quotachecks happen at mount time only). - */ - if (XFS_IS_UQUOTA_ON(mp)) { - error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, - XFS_DQ_USER, nblks, rtblks); - if (error) - goto error0; - } - - if (XFS_IS_GQUOTA_ON(mp)) { - error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, - XFS_DQ_GROUP, nblks, rtblks); - if (error) - goto error0; - } - - if (XFS_IS_PQUOTA_ON(mp)) { - error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), - XFS_DQ_PROJ, nblks, rtblks); - if (error) - goto error0; - } - - xfs_iunlock(ip, XFS_ILOCK_EXCL); - IRELE(ip); - *res = BULKSTAT_RV_DIDONE; - return 0; - -error0: - xfs_iunlock(ip, XFS_ILOCK_EXCL); - IRELE(ip); - *res = BULKSTAT_RV_GIVEUP; - return error; -} - -/* - * Walk thru all the filesystem inodes and construct a consistent view - * of the disk quota world. If the quotacheck fails, disable quotas. - */ -int -xfs_qm_quotacheck( - xfs_mount_t *mp) -{ - int done, count, error; - xfs_ino_t lastino; - size_t structsz; - xfs_inode_t *uip, *gip; - uint flags; - - count = INT_MAX; - structsz = 1; - lastino = 0; - flags = 0; - - ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - /* - * There should be no cached dquots. The (simplistic) quotacheck - * algorithm doesn't like that. - */ - ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); - - xfs_notice(mp, "Quotacheck needed: Please wait."); - - /* - * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset - * their counters to zero. We need a clean slate. - * We don't log our changes till later. - */ - uip = mp->m_quotainfo->qi_uquotaip; - if (uip) { - error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA); - if (error) - goto error_return; - flags |= XFS_UQUOTA_CHKD; - } - - gip = mp->m_quotainfo->qi_gquotaip; - if (gip) { - error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? - XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); - if (error) - goto error_return; - flags |= XFS_OQUOTA_CHKD; - } - - do { - /* - * Iterate thru all the inodes in the file system, - * adjusting the corresponding dquot counters in core. - */ - error = xfs_bulkstat(mp, &lastino, &count, - xfs_qm_dqusage_adjust, - structsz, NULL, &done); - if (error) - break; - - } while (!done); - - /* - * We've made all the changes that we need to make incore. - * Flush them down to disk buffers if everything was updated - * successfully. - */ - if (!error) - error = xfs_qm_dqflush_all(mp, 0); - - /* - * We can get this error if we couldn't do a dquot allocation inside - * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the - * dirty dquots that might be cached, we just want to get rid of them - * and turn quotaoff. The dquots won't be attached to any of the inodes - * at this point (because we intentionally didn't in dqget_noattach). - */ - if (error) { - xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); - goto error_return; - } - - /* - * We didn't log anything, because if we crashed, we'll have to - * start the quotacheck from scratch anyway. However, we must make - * sure that our dquot changes are secure before we put the - * quotacheck'd stamp on the superblock. So, here we do a synchronous - * flush. - */ - XFS_bflush(mp->m_ddev_targp); - - /* - * If one type of quotas is off, then it will lose its - * quotachecked status, since we won't be doing accounting for - * that type anymore. - */ - mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); - mp->m_qflags |= flags; - - error_return: - if (error) { - xfs_warn(mp, - "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", - error); - /* - * We must turn off quotas. - */ - ASSERT(mp->m_quotainfo != NULL); - ASSERT(xfs_Gqm != NULL); - xfs_qm_destroy_quotainfo(mp); - if (xfs_mount_reset_sbqflags(mp)) { - xfs_warn(mp, - "Quotacheck: Failed to reset quota flags."); - } - } else - xfs_notice(mp, "Quotacheck: Done."); - return (error); -} - -/* - * This is called after the superblock has been read in and we're ready to - * iget the quota inodes. - */ -STATIC int -xfs_qm_init_quotainos( - xfs_mount_t *mp) -{ - xfs_inode_t *uip, *gip; - int error; - __int64_t sbflags; - uint flags; - - ASSERT(mp->m_quotainfo); - uip = gip = NULL; - sbflags = 0; - flags = 0; - - /* - * Get the uquota and gquota inodes - */ - if (xfs_sb_version_hasquota(&mp->m_sb)) { - if (XFS_IS_UQUOTA_ON(mp) && - mp->m_sb.sb_uquotino != NULLFSINO) { - ASSERT(mp->m_sb.sb_uquotino > 0); - if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, - 0, 0, &uip))) - return XFS_ERROR(error); - } - if (XFS_IS_OQUOTA_ON(mp) && - mp->m_sb.sb_gquotino != NULLFSINO) { - ASSERT(mp->m_sb.sb_gquotino > 0); - if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, - 0, 0, &gip))) { - if (uip) - IRELE(uip); - return XFS_ERROR(error); - } - } - } else { - flags |= XFS_QMOPT_SBVERSION; - sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | - XFS_SB_GQUOTINO | XFS_SB_QFLAGS); - } - - /* - * Create the two inodes, if they don't exist already. The changes - * made above will get added to a transaction and logged in one of - * the qino_alloc calls below. If the device is readonly, - * temporarily switch to read-write to do this. - */ - if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { - if ((error = xfs_qm_qino_alloc(mp, &uip, - sbflags | XFS_SB_UQUOTINO, - flags | XFS_QMOPT_UQUOTA))) - return XFS_ERROR(error); - - flags &= ~XFS_QMOPT_SBVERSION; - } - if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { - flags |= (XFS_IS_GQUOTA_ON(mp) ? - XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); - error = xfs_qm_qino_alloc(mp, &gip, - sbflags | XFS_SB_GQUOTINO, flags); - if (error) { - if (uip) - IRELE(uip); - - return XFS_ERROR(error); - } - } - - mp->m_quotainfo->qi_uquotaip = uip; - mp->m_quotainfo->qi_gquotaip = gip; - - return 0; -} - - - -/* - * Just pop the least recently used dquot off the freelist and - * recycle it. The returned dquot is locked. - */ -STATIC xfs_dquot_t * -xfs_qm_dqreclaim_one(void) -{ - xfs_dquot_t *dqpout; - xfs_dquot_t *dqp; - int restarts; - int startagain; - - restarts = 0; - dqpout = NULL; - - /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ -again: - startagain = 0; - mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); - - list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { - struct xfs_mount *mp = dqp->q_mount; - xfs_dqlock(dqp); - - /* - * We are racing with dqlookup here. Naturally we don't - * want to reclaim a dquot that lookup wants. We release the - * freelist lock and start over, so that lookup will grab - * both the dquot and the freelistlock. - */ - if (dqp->dq_flags & XFS_DQ_WANT) { - ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); - - trace_xfs_dqreclaim_want(dqp); - XQM_STATS_INC(xqmstats.xs_qm_dqwants); - restarts++; - startagain = 1; - goto dqunlock; - } - - /* - * If the dquot is inactive, we are assured that it is - * not on the mplist or the hashlist, and that makes our - * life easier. - */ - if (dqp->dq_flags & XFS_DQ_INACTIVE) { - ASSERT(mp == NULL); - ASSERT(! XFS_DQ_IS_DIRTY(dqp)); - ASSERT(list_empty(&dqp->q_hashlist)); - ASSERT(list_empty(&dqp->q_mplist)); - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; - dqpout = dqp; - XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); - goto dqunlock; - } - - ASSERT(dqp->q_hash); - ASSERT(!list_empty(&dqp->q_mplist)); - - /* - * Try to grab the flush lock. If this dquot is in the process - * of getting flushed to disk, we don't want to reclaim it. - */ - if (!xfs_dqflock_nowait(dqp)) - goto dqunlock; - - /* - * We have the flush lock so we know that this is not in the - * process of being flushed. So, if this is dirty, flush it - * DELWRI so that we don't get a freelist infested with - * dirty dquots. - */ - if (XFS_DQ_IS_DIRTY(dqp)) { - int error; - - trace_xfs_dqreclaim_dirty(dqp); - - /* - * We flush it delayed write, so don't bother - * releasing the freelist lock. - */ - error = xfs_qm_dqflush(dqp, 0); - if (error) { - xfs_warn(mp, "%s: dquot %p flush failed", - __func__, dqp); - } - goto dqunlock; - } - - /* - * We're trying to get the hashlock out of order. This races - * with dqlookup; so, we giveup and goto the next dquot if - * we couldn't get the hashlock. This way, we won't starve - * a dqlookup process that holds the hashlock that is - * waiting for the freelist lock. - */ - if (!mutex_trylock(&dqp->q_hash->qh_lock)) { - restarts++; - goto dqfunlock; - } - - /* - * This races with dquot allocation code as well as dqflush_all - * and reclaim code. So, if we failed to grab the mplist lock, - * giveup everything and start over. - */ - if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) { - restarts++; - startagain = 1; - goto qhunlock; - } - - ASSERT(dqp->q_nrefs == 0); - list_del_init(&dqp->q_mplist); - mp->m_quotainfo->qi_dquots--; - mp->m_quotainfo->qi_dqreclaims++; - list_del_init(&dqp->q_hashlist); - dqp->q_hash->qh_version++; - list_del_init(&dqp->q_freelist); - xfs_Gqm->qm_dqfrlist_cnt--; - dqpout = dqp; - mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); -qhunlock: - mutex_unlock(&dqp->q_hash->qh_lock); -dqfunlock: - xfs_dqfunlock(dqp); -dqunlock: - xfs_dqunlock(dqp); - if (dqpout) - break; - if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - break; - if (startagain) { - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - goto again; - } - } - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - return dqpout; -} - -/* - * Traverse the freelist of dquots and attempt to reclaim a maximum of - * 'howmany' dquots. This operation races with dqlookup(), and attempts to - * favor the lookup function ... - */ -STATIC int -xfs_qm_shake_freelist( - int howmany) -{ - int nreclaimed = 0; - xfs_dquot_t *dqp; - - if (howmany <= 0) - return 0; - - while (nreclaimed < howmany) { - dqp = xfs_qm_dqreclaim_one(); - if (!dqp) - return nreclaimed; - xfs_qm_dqdestroy(dqp); - nreclaimed++; - } - return nreclaimed; -} - -/* - * The kmem_shake interface is invoked when memory is running low. - */ -/* ARGSUSED */ -STATIC int -xfs_qm_shake( - struct shrinker *shrink, - struct shrink_control *sc) -{ - int ndqused, nfree, n; - gfp_t gfp_mask = sc->gfp_mask; - - if (!kmem_shake_allow(gfp_mask)) - return 0; - if (!xfs_Gqm) - return 0; - - nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ - /* incore dquots in all f/s's */ - ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; - - ASSERT(ndqused >= 0); - - if (nfree <= ndqused && nfree < ndquot) - return 0; - - ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ - n = nfree - ndqused - ndquot; /* # over target */ - - return xfs_qm_shake_freelist(MAX(nfree, n)); -} - - -/*------------------------------------------------------------------*/ - -/* - * Return a new incore dquot. Depending on the number of - * dquots in the system, we either allocate a new one on the kernel heap, - * or reclaim a free one. - * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed - * to reclaim an existing one from the freelist. - */ -boolean_t -xfs_qm_dqalloc_incore( - xfs_dquot_t **O_dqpp) -{ - xfs_dquot_t *dqp; - - /* - * Check against high water mark to see if we want to pop - * a nincompoop dquot off the freelist. - */ - if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { - /* - * Try to recycle a dquot from the freelist. - */ - if ((dqp = xfs_qm_dqreclaim_one())) { - XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); - /* - * Just zero the core here. The rest will get - * reinitialized by caller. XXX we shouldn't even - * do this zero ... - */ - memset(&dqp->q_core, 0, sizeof(dqp->q_core)); - *O_dqpp = dqp; - return B_FALSE; - } - XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); - } - - /* - * Allocate a brand new dquot on the kernel heap and return it - * to the caller to initialize. - */ - ASSERT(xfs_Gqm->qm_dqzone != NULL); - *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); - atomic_inc(&xfs_Gqm->qm_totaldquots); - - return B_TRUE; -} - - -/* - * Start a transaction and write the incore superblock changes to - * disk. flags parameter indicates which fields have changed. - */ -int -xfs_qm_write_sb_changes( - xfs_mount_t *mp, - __int64_t flags) -{ - xfs_trans_t *tp; - int error; - - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); - if ((error = xfs_trans_reserve(tp, 0, - mp->m_sb.sb_sectsize + 128, 0, - 0, - XFS_DEFAULT_LOG_COUNT))) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_mod_sb(tp, flags); - error = xfs_trans_commit(tp, 0); - - return error; -} - - -/* --------------- utility functions for vnodeops ---------------- */ - - -/* - * Given an inode, a uid, gid and prid make sure that we have - * allocated relevant dquot(s) on disk, and that we won't exceed inode - * quotas by creating this file. - * This also attaches dquot(s) to the given inode after locking it, - * and returns the dquots corresponding to the uid and/or gid. - * - * in : inode (unlocked) - * out : udquot, gdquot with references taken and unlocked - */ -int -xfs_qm_vop_dqalloc( - struct xfs_inode *ip, - uid_t uid, - gid_t gid, - prid_t prid, - uint flags, - struct xfs_dquot **O_udqpp, - struct xfs_dquot **O_gdqpp) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_dquot *uq, *gq; - int error; - uint lockflags; - - if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) - return 0; - - lockflags = XFS_ILOCK_EXCL; - xfs_ilock(ip, lockflags); - - if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) - gid = ip->i_d.di_gid; - - /* - * Attach the dquot(s) to this inode, doing a dquot allocation - * if necessary. The dquot(s) will not be locked. - */ - if (XFS_NOT_DQATTACHED(mp, ip)) { - error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); - if (error) { - xfs_iunlock(ip, lockflags); - return error; - } - } - - uq = gq = NULL; - if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { - if (ip->i_d.di_uid != uid) { - /* - * What we need is the dquot that has this uid, and - * if we send the inode to dqget, the uid of the inode - * takes priority over what's sent in the uid argument. - * We must unlock inode here before calling dqget if - * we're not sending the inode, because otherwise - * we'll deadlock by doing trans_reserve while - * holding ilock. - */ - xfs_iunlock(ip, lockflags); - if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, - XFS_DQ_USER, - XFS_QMOPT_DQALLOC | - XFS_QMOPT_DOWARN, - &uq))) { - ASSERT(error != ENOENT); - return error; - } - /* - * Get the ilock in the right order. - */ - xfs_dqunlock(uq); - lockflags = XFS_ILOCK_SHARED; - xfs_ilock(ip, lockflags); - } else { - /* - * Take an extra reference, because we'll return - * this to caller - */ - ASSERT(ip->i_udquot); - uq = ip->i_udquot; - xfs_dqlock(uq); - XFS_DQHOLD(uq); - xfs_dqunlock(uq); - } - } - if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { - if (ip->i_d.di_gid != gid) { - xfs_iunlock(ip, lockflags); - if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, - XFS_DQ_GROUP, - XFS_QMOPT_DQALLOC | - XFS_QMOPT_DOWARN, - &gq))) { - if (uq) - xfs_qm_dqrele(uq); - ASSERT(error != ENOENT); - return error; - } - xfs_dqunlock(gq); - lockflags = XFS_ILOCK_SHARED; - xfs_ilock(ip, lockflags); - } else { - ASSERT(ip->i_gdquot); - gq = ip->i_gdquot; - xfs_dqlock(gq); - XFS_DQHOLD(gq); - xfs_dqunlock(gq); - } - } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { - if (xfs_get_projid(ip) != prid) { - xfs_iunlock(ip, lockflags); - if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, - XFS_DQ_PROJ, - XFS_QMOPT_DQALLOC | - XFS_QMOPT_DOWARN, - &gq))) { - if (uq) - xfs_qm_dqrele(uq); - ASSERT(error != ENOENT); - return (error); - } - xfs_dqunlock(gq); - lockflags = XFS_ILOCK_SHARED; - xfs_ilock(ip, lockflags); - } else { - ASSERT(ip->i_gdquot); - gq = ip->i_gdquot; - xfs_dqlock(gq); - XFS_DQHOLD(gq); - xfs_dqunlock(gq); - } - } - if (uq) - trace_xfs_dquot_dqalloc(ip); - - xfs_iunlock(ip, lockflags); - if (O_udqpp) - *O_udqpp = uq; - else if (uq) - xfs_qm_dqrele(uq); - if (O_gdqpp) - *O_gdqpp = gq; - else if (gq) - xfs_qm_dqrele(gq); - return 0; -} - -/* - * Actually transfer ownership, and do dquot modifications. - * These were already reserved. - */ -xfs_dquot_t * -xfs_qm_vop_chown( - xfs_trans_t *tp, - xfs_inode_t *ip, - xfs_dquot_t **IO_olddq, - xfs_dquot_t *newdq) -{ - xfs_dquot_t *prevdq; - uint bfield = XFS_IS_REALTIME_INODE(ip) ? - XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; - - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); - - /* old dquot */ - prevdq = *IO_olddq; - ASSERT(prevdq); - ASSERT(prevdq != newdq); - - xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); - xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); - - /* the sparkling new dquot */ - xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); - xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); - - /* - * Take an extra reference, because the inode - * is going to keep this dquot pointer even - * after the trans_commit. - */ - xfs_dqlock(newdq); - XFS_DQHOLD(newdq); - xfs_dqunlock(newdq); - *IO_olddq = newdq; - - return prevdq; -} - -/* - * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). - */ -int -xfs_qm_vop_chown_reserve( - xfs_trans_t *tp, - xfs_inode_t *ip, - xfs_dquot_t *udqp, - xfs_dquot_t *gdqp, - uint flags) -{ - xfs_mount_t *mp = ip->i_mount; - uint delblks, blkflags, prjflags = 0; - xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; - int error; - - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - delblks = ip->i_delayed_blks; - delblksudq = delblksgdq = unresudq = unresgdq = NULL; - blkflags = XFS_IS_REALTIME_INODE(ip) ? - XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; - - if (XFS_IS_UQUOTA_ON(mp) && udqp && - ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { - delblksudq = udqp; - /* - * If there are delayed allocation blocks, then we have to - * unreserve those from the old dquot, and add them to the - * new dquot. - */ - if (delblks) { - ASSERT(ip->i_udquot); - unresudq = ip->i_udquot; - } - } - if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { - if (XFS_IS_PQUOTA_ON(ip->i_mount) && - xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) - prjflags = XFS_QMOPT_ENOSPC; - - if (prjflags || - (XFS_IS_GQUOTA_ON(ip->i_mount) && - ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { - delblksgdq = gdqp; - if (delblks) { - ASSERT(ip->i_gdquot); - unresgdq = ip->i_gdquot; - } - } - } - - if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, - delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, - flags | blkflags | prjflags))) - return (error); - - /* - * Do the delayed blks reservations/unreservations now. Since, these - * are done without the help of a transaction, if a reservation fails - * its previous reservations won't be automatically undone by trans - * code. So, we have to do it manually here. - */ - if (delblks) { - /* - * Do the reservations first. Unreservation can't fail. - */ - ASSERT(delblksudq || delblksgdq); - ASSERT(unresudq || unresgdq); - if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, - delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, - flags | blkflags | prjflags))) - return (error); - xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, - unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, - blkflags); - } - - return (0); -} - -int -xfs_qm_vop_rename_dqattach( - struct xfs_inode **i_tab) -{ - struct xfs_mount *mp = i_tab[0]->i_mount; - int i; - - if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) - return 0; - - for (i = 0; (i < 4 && i_tab[i]); i++) { - struct xfs_inode *ip = i_tab[i]; - int error; - - /* - * Watch out for duplicate entries in the table. - */ - if (i == 0 || ip != i_tab[i-1]) { - if (XFS_NOT_DQATTACHED(mp, ip)) { - error = xfs_qm_dqattach(ip, 0); - if (error) - return error; - } - } - } - return 0; -} - -void -xfs_qm_vop_create_dqattach( - struct xfs_trans *tp, - struct xfs_inode *ip, - struct xfs_dquot *udqp, - struct xfs_dquot *gdqp) -{ - struct xfs_mount *mp = tp->t_mountp; - - if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) - return; - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - if (udqp) { - xfs_dqlock(udqp); - XFS_DQHOLD(udqp); - xfs_dqunlock(udqp); - ASSERT(ip->i_udquot == NULL); - ip->i_udquot = udqp; - ASSERT(XFS_IS_UQUOTA_ON(mp)); - ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); - xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); - } - if (gdqp) { - xfs_dqlock(gdqp); - XFS_DQHOLD(gdqp); - xfs_dqunlock(gdqp); - ASSERT(ip->i_gdquot == NULL); - ip->i_gdquot = gdqp; - ASSERT(XFS_IS_OQUOTA_ON(mp)); - ASSERT((XFS_IS_GQUOTA_ON(mp) ? - ip->i_d.di_gid : xfs_get_projid(ip)) == - be32_to_cpu(gdqp->q_core.d_id)); - xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); - } -} - diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h deleted file mode 100644 index 43b9abe..0000000 --- a/fs/xfs/quota/xfs_qm.h +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_QM_H__ -#define __XFS_QM_H__ - -#include "xfs_dquot_item.h" -#include "xfs_dquot.h" -#include "xfs_quota_priv.h" -#include "xfs_qm_stats.h" - -struct xfs_qm; -struct xfs_inode; - -extern uint ndquot; -extern struct mutex xfs_Gqm_lock; -extern struct xfs_qm *xfs_Gqm; -extern kmem_zone_t *qm_dqzone; -extern kmem_zone_t *qm_dqtrxzone; - -/* - * Used in xfs_qm_sync called by xfs_sync to count the max times that it can - * iterate over the mountpt's dquot list in one call. - */ -#define XFS_QM_SYNC_MAX_RESTARTS 7 - -/* - * Ditto, for xfs_qm_dqreclaim_one. - */ -#define XFS_QM_RECLAIM_MAX_RESTARTS 4 - -/* - * Ideal ratio of free to in use dquots. Quota manager makes an attempt - * to keep this balance. - */ -#define XFS_QM_DQFREE_RATIO 2 - -/* - * Dquot hashtable constants/threshold values. - */ -#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) -#define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t)) - -/* - * This defines the unit of allocation of dquots. - * Currently, it is just one file system block, and a 4K blk contains 30 - * (136 * 30 = 4080) dquots. It's probably not worth trying to make - * this more dynamic. - * XXXsup However, if this number is changed, we have to make sure that we don't - * implicitly assume that we do allocations in chunks of a single filesystem - * block in the dquot/xqm code. - */ -#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 - -typedef xfs_dqhash_t xfs_dqlist_t; - -/* - * Quota Manager (global) structure. Lives only in core. - */ -typedef struct xfs_qm { - xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ - xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ - uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ - struct list_head qm_dqfrlist; /* freelist of dquots */ - struct mutex qm_dqfrlist_lock; - int qm_dqfrlist_cnt; - atomic_t qm_totaldquots; /* total incore dquots */ - uint qm_nrefs; /* file systems with quota on */ - int qm_dqfree_ratio;/* ratio of free to inuse dquots */ - kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ - kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ -} xfs_qm_t; - -/* - * Various quota information for individual filesystems. - * The mount structure keeps a pointer to this. - */ -typedef struct xfs_quotainfo { - xfs_inode_t *qi_uquotaip; /* user quota inode */ - xfs_inode_t *qi_gquotaip; /* group quota inode */ - struct list_head qi_dqlist; /* all dquots in filesys */ - struct mutex qi_dqlist_lock; - int qi_dquots; - int qi_dqreclaims; /* a change here indicates - a removal in the dqlist */ - time_t qi_btimelimit; /* limit for blks timer */ - time_t qi_itimelimit; /* limit for inodes timer */ - time_t qi_rtbtimelimit;/* limit for rt blks timer */ - xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */ - xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */ - xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */ - struct mutex qi_quotaofflock;/* to serialize quotaoff */ - xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ - uint qi_dqperchunk; /* # ondisk dqs in above chunk */ - xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */ - xfs_qcnt_t qi_bsoftlimit; /* default data blk soft limit */ - xfs_qcnt_t qi_ihardlimit; /* default inode count hard limit */ - xfs_qcnt_t qi_isoftlimit; /* default inode count soft limit */ - xfs_qcnt_t qi_rtbhardlimit;/* default realtime blk hard limit */ - xfs_qcnt_t qi_rtbsoftlimit;/* default realtime blk soft limit */ -} xfs_quotainfo_t; - - -extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); -extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, - xfs_dquot_t *, xfs_dquot_t *, long, long, uint); -extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *); -extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *); - -/* - * We keep the usr and grp dquots separately so that locking will be easier - * to do at commit time. All transactions that we know of at this point - * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value. - */ -#define XFS_QM_TRANS_MAXDQS 2 -typedef struct xfs_dquot_acct { - xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS]; - xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS]; -} xfs_dquot_acct_t; - -/* - * Users are allowed to have a usage exceeding their softlimit for - * a period this long. - */ -#define XFS_QM_BTIMELIMIT (7 * 24*60*60) /* 1 week */ -#define XFS_QM_RTBTIMELIMIT (7 * 24*60*60) /* 1 week */ -#define XFS_QM_ITIMELIMIT (7 * 24*60*60) /* 1 week */ - -#define XFS_QM_BWARNLIMIT 5 -#define XFS_QM_IWARNLIMIT 5 -#define XFS_QM_RTBWARNLIMIT 5 - -extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); -extern int xfs_qm_quotacheck(xfs_mount_t *); -extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); - -/* dquot stuff */ -extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); -extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); -extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); - -/* quota ops */ -extern int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint); -extern int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint, - fs_disk_quota_t *); -extern int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint, - fs_disk_quota_t *); -extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); -extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); -extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); - -#endif /* __XFS_QM_H__ */ diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c deleted file mode 100644 index a0a829a..0000000 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright (c) 2000-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_itable.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_qm.h" - - -STATIC void -xfs_fill_statvfs_from_dquot( - struct kstatfs *statp, - xfs_disk_dquot_t *dp) -{ - __uint64_t limit; - - limit = dp->d_blk_softlimit ? - be64_to_cpu(dp->d_blk_softlimit) : - be64_to_cpu(dp->d_blk_hardlimit); - if (limit && statp->f_blocks > limit) { - statp->f_blocks = limit; - statp->f_bfree = statp->f_bavail = - (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? - (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; - } - - limit = dp->d_ino_softlimit ? - be64_to_cpu(dp->d_ino_softlimit) : - be64_to_cpu(dp->d_ino_hardlimit); - if (limit && statp->f_files > limit) { - statp->f_files = limit; - statp->f_ffree = - (statp->f_files > be64_to_cpu(dp->d_icount)) ? - (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; - } -} - - -/* - * Directory tree accounting is implemented using project quotas, where - * the project identifier is inherited from parent directories. - * A statvfs (df, etc.) of a directory that is using project quota should - * return a statvfs of the project, not the entire filesystem. - * This makes such trees appear as if they are filesystems in themselves. - */ -void -xfs_qm_statvfs( - xfs_inode_t *ip, - struct kstatfs *statp) -{ - xfs_mount_t *mp = ip->i_mount; - xfs_dquot_t *dqp; - - if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) { - xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); - xfs_qm_dqput(dqp); - } -} - -int -xfs_qm_newmount( - xfs_mount_t *mp, - uint *needquotamount, - uint *quotaflags) -{ - uint quotaondisk; - uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; - - quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && - (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); - - if (quotaondisk) { - uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; - pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT; - gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; - } - - /* - * If the device itself is read-only, we can't allow - * the user to change the state of quota on the mount - - * this would generate a transaction on the ro device, - * which would lead to an I/O error and shutdown - */ - - if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || - (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || - (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) || - (!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) || - (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || - (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) && - xfs_dev_is_read_only(mp, "changing quota state")) { - xfs_warn(mp, "please mount with%s%s%s%s.", - (!quotaondisk ? "out quota" : ""), - (uquotaondisk ? " usrquota" : ""), - (pquotaondisk ? " prjquota" : ""), - (gquotaondisk ? " grpquota" : "")); - return XFS_ERROR(EPERM); - } - - if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { - /* - * Call mount_quotas at this point only if we won't have to do - * a quotacheck. - */ - if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { - /* - * If an error occurred, qm_mount_quotas code - * has already disabled quotas. So, just finish - * mounting, and get on with the boring life - * without disk quotas. - */ - xfs_qm_mount_quotas(mp); - } else { - /* - * Clear the quota flags, but remember them. This - * is so that the quota code doesn't get invoked - * before we're ready. This can happen when an - * inode goes inactive and wants to free blocks, - * or via xfs_log_mount_finish. - */ - *needquotamount = B_TRUE; - *quotaflags = mp->m_qflags; - mp->m_qflags = 0; - } - } - - return 0; -} - -void __init -xfs_qm_init(void) -{ - printk(KERN_INFO "SGI XFS Quota Management subsystem\n"); - mutex_init(&xfs_Gqm_lock); - xfs_qm_init_procfs(); -} - -void __exit -xfs_qm_exit(void) -{ - xfs_qm_cleanup_procfs(); - if (qm_dqzone) - kmem_zone_destroy(qm_dqzone); - if (qm_dqtrxzone) - kmem_zone_destroy(qm_dqtrxzone); -} diff --git a/fs/xfs/quota/xfs_qm_stats.c b/fs/xfs/quota/xfs_qm_stats.c deleted file mode 100644 index 8671a0b..0000000 --- a/fs/xfs/quota/xfs_qm_stats.c +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2000-2003 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_itable.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_qm.h" - -struct xqmstats xqmstats; - -static int xqm_proc_show(struct seq_file *m, void *v) -{ - /* maximum; incore; ratio free to inuse; freelist */ - seq_printf(m, "%d\t%d\t%d\t%u\n", - ndquot, - xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, - xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, - xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); - return 0; -} - -static int xqm_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, xqm_proc_show, NULL); -} - -static const struct file_operations xqm_proc_fops = { - .owner = THIS_MODULE, - .open = xqm_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int xqmstat_proc_show(struct seq_file *m, void *v) -{ - /* quota performance statistics */ - seq_printf(m, "qm %u %u %u %u %u %u %u %u\n", - xqmstats.xs_qm_dqreclaims, - xqmstats.xs_qm_dqreclaim_misses, - xqmstats.xs_qm_dquot_dups, - xqmstats.xs_qm_dqcachemisses, - xqmstats.xs_qm_dqcachehits, - xqmstats.xs_qm_dqwants, - xqmstats.xs_qm_dqshake_reclaims, - xqmstats.xs_qm_dqinact_reclaims); - return 0; -} - -static int xqmstat_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, xqmstat_proc_show, NULL); -} - -static const struct file_operations xqmstat_proc_fops = { - .owner = THIS_MODULE, - .open = xqmstat_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void -xfs_qm_init_procfs(void) -{ - proc_create("fs/xfs/xqmstat", 0, NULL, &xqmstat_proc_fops); - proc_create("fs/xfs/xqm", 0, NULL, &xqm_proc_fops); -} - -void -xfs_qm_cleanup_procfs(void) -{ - remove_proc_entry("fs/xfs/xqm", NULL); - remove_proc_entry("fs/xfs/xqmstat", NULL); -} diff --git a/fs/xfs/quota/xfs_qm_stats.h b/fs/xfs/quota/xfs_qm_stats.h deleted file mode 100644 index 5b964fc..0000000 --- a/fs/xfs/quota/xfs_qm_stats.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2002 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_QM_STATS_H__ -#define __XFS_QM_STATS_H__ - -#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) - -/* - * XQM global statistics - */ -struct xqmstats { - __uint32_t xs_qm_dqreclaims; - __uint32_t xs_qm_dqreclaim_misses; - __uint32_t xs_qm_dquot_dups; - __uint32_t xs_qm_dqcachemisses; - __uint32_t xs_qm_dqcachehits; - __uint32_t xs_qm_dqwants; - __uint32_t xs_qm_dqshake_reclaims; - __uint32_t xs_qm_dqinact_reclaims; -}; - -extern struct xqmstats xqmstats; - -# define XQM_STATS_INC(count) ( (count)++ ) - -extern void xfs_qm_init_procfs(void); -extern void xfs_qm_cleanup_procfs(void); - -#else - -# define XQM_STATS_INC(count) do { } while (0) - -static inline void xfs_qm_init_procfs(void) { }; -static inline void xfs_qm_cleanup_procfs(void) { }; - -#endif - -#endif /* __XFS_QM_STATS_H__ */ diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c deleted file mode 100644 index 609246f..0000000 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ /dev/null @@ -1,906 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include - -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_itable.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_utils.h" -#include "xfs_qm.h" -#include "xfs_trace.h" - -STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); -STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, - uint); -STATIC uint xfs_qm_export_flags(uint); -STATIC uint xfs_qm_export_qtype_flags(uint); -STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *, - fs_disk_quota_t *); - - -/* - * Turn off quota accounting and/or enforcement for all udquots and/or - * gdquots. Called only at unmount time. - * - * This assumes that there are no dquots of this file system cached - * incore, and modifies the ondisk dquot directly. Therefore, for example, - * it is an error to call this twice, without purging the cache. - */ -int -xfs_qm_scall_quotaoff( - xfs_mount_t *mp, - uint flags) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - uint dqtype; - int error; - uint inactivate_flags; - xfs_qoff_logitem_t *qoffstart; - int nculprits; - - /* - * No file system can have quotas enabled on disk but not in core. - * Note that quota utilities (like quotaoff) _expect_ - * errno == EEXIST here. - */ - if ((mp->m_qflags & flags) == 0) - return XFS_ERROR(EEXIST); - error = 0; - - flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); - - /* - * We don't want to deal with two quotaoffs messing up each other, - * so we're going to serialize it. quotaoff isn't exactly a performance - * critical thing. - * If quotaoff, then we must be dealing with the root filesystem. - */ - ASSERT(q); - mutex_lock(&q->qi_quotaofflock); - - /* - * If we're just turning off quota enforcement, change mp and go. - */ - if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { - mp->m_qflags &= ~(flags); - - spin_lock(&mp->m_sb_lock); - mp->m_sb.sb_qflags = mp->m_qflags; - spin_unlock(&mp->m_sb_lock); - mutex_unlock(&q->qi_quotaofflock); - - /* XXX what to do if error ? Revert back to old vals incore ? */ - error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); - return (error); - } - - dqtype = 0; - inactivate_flags = 0; - /* - * If accounting is off, we must turn enforcement off, clear the - * quota 'CHKD' certificate to make it known that we have to - * do a quotacheck the next time this quota is turned on. - */ - if (flags & XFS_UQUOTA_ACCT) { - dqtype |= XFS_QMOPT_UQUOTA; - flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); - inactivate_flags |= XFS_UQUOTA_ACTIVE; - } - if (flags & XFS_GQUOTA_ACCT) { - dqtype |= XFS_QMOPT_GQUOTA; - flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); - inactivate_flags |= XFS_GQUOTA_ACTIVE; - } else if (flags & XFS_PQUOTA_ACCT) { - dqtype |= XFS_QMOPT_PQUOTA; - flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); - inactivate_flags |= XFS_PQUOTA_ACTIVE; - } - - /* - * Nothing to do? Don't complain. This happens when we're just - * turning off quota enforcement. - */ - if ((mp->m_qflags & flags) == 0) - goto out_unlock; - - /* - * Write the LI_QUOTAOFF log record, and do SB changes atomically, - * and synchronously. If we fail to write, we should abort the - * operation as it cannot be recovered safely if we crash. - */ - error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); - if (error) - goto out_unlock; - - /* - * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct - * to take care of the race between dqget and quotaoff. We don't take - * any special locks to reset these bits. All processes need to check - * these bits *after* taking inode lock(s) to see if the particular - * quota type is in the process of being turned off. If *ACTIVE, it is - * guaranteed that all dquot structures and all quotainode ptrs will all - * stay valid as long as that inode is kept locked. - * - * There is no turning back after this. - */ - mp->m_qflags &= ~inactivate_flags; - - /* - * Give back all the dquot reference(s) held by inodes. - * Here we go thru every single incore inode in this file system, and - * do a dqrele on the i_udquot/i_gdquot that it may have. - * Essentially, as long as somebody has an inode locked, this guarantees - * that quotas will not be turned off. This is handy because in a - * transaction once we lock the inode(s) and check for quotaon, we can - * depend on the quota inodes (and other things) being valid as long as - * we keep the lock(s). - */ - xfs_qm_dqrele_all_inodes(mp, flags); - - /* - * Next we make the changes in the quota flag in the mount struct. - * This isn't protected by a particular lock directly, because we - * don't want to take a mrlock every time we depend on quotas being on. - */ - mp->m_qflags &= ~(flags); - - /* - * Go through all the dquots of this file system and purge them, - * according to what was turned off. We may not be able to get rid - * of all dquots, because dquots can have temporary references that - * are not attached to inodes. eg. xfs_setattr, xfs_create. - * So, if we couldn't purge all the dquots from the filesystem, - * we can't get rid of the incore data structures. - */ - while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype))) - delay(10 * nculprits); - - /* - * Transactions that had started before ACTIVE state bit was cleared - * could have logged many dquots, so they'd have higher LSNs than - * the first QUOTAOFF log record does. If we happen to crash when - * the tail of the log has gone past the QUOTAOFF record, but - * before the last dquot modification, those dquots __will__ - * recover, and that's not good. - * - * So, we have QUOTAOFF start and end logitems; the start - * logitem won't get overwritten until the end logitem appears... - */ - error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); - if (error) { - /* We're screwed now. Shutdown is the only option. */ - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - goto out_unlock; - } - - /* - * If quotas is completely disabled, close shop. - */ - if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) || - ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) { - mutex_unlock(&q->qi_quotaofflock); - xfs_qm_destroy_quotainfo(mp); - return (0); - } - - /* - * Release our quotainode references if we don't need them anymore. - */ - if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { - IRELE(q->qi_uquotaip); - q->qi_uquotaip = NULL; - } - if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) { - IRELE(q->qi_gquotaip); - q->qi_gquotaip = NULL; - } - -out_unlock: - mutex_unlock(&q->qi_quotaofflock); - return error; -} - -STATIC int -xfs_qm_scall_trunc_qfile( - struct xfs_mount *mp, - xfs_ino_t ino) -{ - struct xfs_inode *ip; - struct xfs_trans *tp; - int error; - - if (ino == NULLFSINO) - return 0; - - error = xfs_iget(mp, NULL, ino, 0, 0, &ip); - if (error) - return error; - - xfs_ilock(ip, XFS_IOLOCK_EXCL); - - tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); - error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, - XFS_TRANS_PERM_LOG_RES, - XFS_ITRUNCATE_LOG_COUNT); - if (error) { - xfs_trans_cancel(tp, 0); - xfs_iunlock(ip, XFS_IOLOCK_EXCL); - goto out_put; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip); - - error = xfs_itruncate_data(&tp, ip, 0); - if (error) { - xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | - XFS_TRANS_ABORT); - goto out_unlock; - } - - xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); - -out_unlock: - xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); -out_put: - IRELE(ip); - return error; -} - -int -xfs_qm_scall_trunc_qfiles( - xfs_mount_t *mp, - uint flags) -{ - int error = 0, error2 = 0; - - if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { - xfs_debug(mp, "%s: flags=%x m_qflags=%x\n", - __func__, flags, mp->m_qflags); - return XFS_ERROR(EINVAL); - } - - if (flags & XFS_DQ_USER) - error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); - if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) - error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); - - return error ? error : error2; -} - -/* - * Switch on (a given) quota enforcement for a filesystem. This takes - * effect immediately. - * (Switching on quota accounting must be done at mount time.) - */ -int -xfs_qm_scall_quotaon( - xfs_mount_t *mp, - uint flags) -{ - int error; - uint qf; - __int64_t sbflags; - - flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); - /* - * Switching on quota accounting must be done at mount time. - */ - flags &= ~(XFS_ALL_QUOTA_ACCT); - - sbflags = 0; - - if (flags == 0) { - xfs_debug(mp, "%s: zero flags, m_qflags=%x\n", - __func__, mp->m_qflags); - return XFS_ERROR(EINVAL); - } - - /* No fs can turn on quotas with a delayed effect */ - ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); - - /* - * Can't enforce without accounting. We check the superblock - * qflags here instead of m_qflags because rootfs can have - * quota acct on ondisk without m_qflags' knowing. - */ - if (((flags & XFS_UQUOTA_ACCT) == 0 && - (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && - (flags & XFS_UQUOTA_ENFD)) - || - ((flags & XFS_PQUOTA_ACCT) == 0 && - (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && - (flags & XFS_GQUOTA_ACCT) == 0 && - (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && - (flags & XFS_OQUOTA_ENFD))) { - xfs_debug(mp, - "%s: Can't enforce without acct, flags=%x sbflags=%x\n", - __func__, flags, mp->m_sb.sb_qflags); - return XFS_ERROR(EINVAL); - } - /* - * If everything's up to-date incore, then don't waste time. - */ - if ((mp->m_qflags & flags) == flags) - return XFS_ERROR(EEXIST); - - /* - * Change sb_qflags on disk but not incore mp->qflags - * if this is the root filesystem. - */ - spin_lock(&mp->m_sb_lock); - qf = mp->m_sb.sb_qflags; - mp->m_sb.sb_qflags = qf | flags; - spin_unlock(&mp->m_sb_lock); - - /* - * There's nothing to change if it's the same. - */ - if ((qf & flags) == flags && sbflags == 0) - return XFS_ERROR(EEXIST); - sbflags |= XFS_SB_QFLAGS; - - if ((error = xfs_qm_write_sb_changes(mp, sbflags))) - return (error); - /* - * If we aren't trying to switch on quota enforcement, we are done. - */ - if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != - (mp->m_qflags & XFS_UQUOTA_ACCT)) || - ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != - (mp->m_qflags & XFS_PQUOTA_ACCT)) || - ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != - (mp->m_qflags & XFS_GQUOTA_ACCT)) || - (flags & XFS_ALL_QUOTA_ENFD) == 0) - return (0); - - if (! XFS_IS_QUOTA_RUNNING(mp)) - return XFS_ERROR(ESRCH); - - /* - * Switch on quota enforcement in core. - */ - mutex_lock(&mp->m_quotainfo->qi_quotaofflock); - mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); - mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); - - return (0); -} - - -/* - * Return quota status information, such as uquota-off, enforcements, etc. - */ -int -xfs_qm_scall_getqstat( - struct xfs_mount *mp, - struct fs_quota_stat *out) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_inode *uip, *gip; - boolean_t tempuqip, tempgqip; - - uip = gip = NULL; - tempuqip = tempgqip = B_FALSE; - memset(out, 0, sizeof(fs_quota_stat_t)); - - out->qs_version = FS_QSTAT_VERSION; - if (!xfs_sb_version_hasquota(&mp->m_sb)) { - out->qs_uquota.qfs_ino = NULLFSINO; - out->qs_gquota.qfs_ino = NULLFSINO; - return (0); - } - out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & - (XFS_ALL_QUOTA_ACCT| - XFS_ALL_QUOTA_ENFD)); - out->qs_pad = 0; - out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; - out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; - - if (q) { - uip = q->qi_uquotaip; - gip = q->qi_gquotaip; - } - if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, - 0, 0, &uip) == 0) - tempuqip = B_TRUE; - } - if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { - if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, - 0, 0, &gip) == 0) - tempgqip = B_TRUE; - } - if (uip) { - out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; - out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; - if (tempuqip) - IRELE(uip); - } - if (gip) { - out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; - out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; - if (tempgqip) - IRELE(gip); - } - if (q) { - out->qs_incoredqs = q->qi_dquots; - out->qs_btimelimit = q->qi_btimelimit; - out->qs_itimelimit = q->qi_itimelimit; - out->qs_rtbtimelimit = q->qi_rtbtimelimit; - out->qs_bwarnlimit = q->qi_bwarnlimit; - out->qs_iwarnlimit = q->qi_iwarnlimit; - } - return 0; -} - -#define XFS_DQ_MASK \ - (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) - -/* - * Adjust quota limits, and start/stop timers accordingly. - */ -int -xfs_qm_scall_setqlim( - xfs_mount_t *mp, - xfs_dqid_t id, - uint type, - fs_disk_quota_t *newlim) -{ - struct xfs_quotainfo *q = mp->m_quotainfo; - xfs_disk_dquot_t *ddq; - xfs_dquot_t *dqp; - xfs_trans_t *tp; - int error; - xfs_qcnt_t hard, soft; - - if (newlim->d_fieldmask & ~XFS_DQ_MASK) - return EINVAL; - if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) - return 0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); - if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, - 0, 0, XFS_DEFAULT_LOG_COUNT))) { - xfs_trans_cancel(tp, 0); - return (error); - } - - /* - * We don't want to race with a quotaoff so take the quotaoff lock. - * (We don't hold an inode lock, so there's nothing else to stop - * a quotaoff from happening). (XXXThis doesn't currently happen - * because we take the vfslock before calling xfs_qm_sysent). - */ - mutex_lock(&q->qi_quotaofflock); - - /* - * Get the dquot (locked), and join it to the transaction. - * Allocate the dquot if this doesn't exist. - */ - if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { - xfs_trans_cancel(tp, XFS_TRANS_ABORT); - ASSERT(error != ENOENT); - goto out_unlock; - } - xfs_trans_dqjoin(tp, dqp); - ddq = &dqp->q_core; - - /* - * Make sure that hardlimits are >= soft limits before changing. - */ - hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : - be64_to_cpu(ddq->d_blk_hardlimit); - soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : - be64_to_cpu(ddq->d_blk_softlimit); - if (hard == 0 || hard >= soft) { - ddq->d_blk_hardlimit = cpu_to_be64(hard); - ddq->d_blk_softlimit = cpu_to_be64(soft); - if (id == 0) { - q->qi_bhardlimit = hard; - q->qi_bsoftlimit = soft; - } - } else { - xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); - } - hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : - be64_to_cpu(ddq->d_rtb_hardlimit); - soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? - (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : - be64_to_cpu(ddq->d_rtb_softlimit); - if (hard == 0 || hard >= soft) { - ddq->d_rtb_hardlimit = cpu_to_be64(hard); - ddq->d_rtb_softlimit = cpu_to_be64(soft); - if (id == 0) { - q->qi_rtbhardlimit = hard; - q->qi_rtbsoftlimit = soft; - } - } else { - xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); - } - - hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? - (xfs_qcnt_t) newlim->d_ino_hardlimit : - be64_to_cpu(ddq->d_ino_hardlimit); - soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? - (xfs_qcnt_t) newlim->d_ino_softlimit : - be64_to_cpu(ddq->d_ino_softlimit); - if (hard == 0 || hard >= soft) { - ddq->d_ino_hardlimit = cpu_to_be64(hard); - ddq->d_ino_softlimit = cpu_to_be64(soft); - if (id == 0) { - q->qi_ihardlimit = hard; - q->qi_isoftlimit = soft; - } - } else { - xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); - } - - /* - * Update warnings counter(s) if requested - */ - if (newlim->d_fieldmask & FS_DQ_BWARNS) - ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); - if (newlim->d_fieldmask & FS_DQ_IWARNS) - ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); - if (newlim->d_fieldmask & FS_DQ_RTBWARNS) - ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); - - if (id == 0) { - /* - * Timelimits for the super user set the relative time - * the other users can be over quota for this file system. - * If it is zero a default is used. Ditto for the default - * soft and hard limit values (already done, above), and - * for warnings. - */ - if (newlim->d_fieldmask & FS_DQ_BTIMER) { - q->qi_btimelimit = newlim->d_btimer; - ddq->d_btimer = cpu_to_be32(newlim->d_btimer); - } - if (newlim->d_fieldmask & FS_DQ_ITIMER) { - q->qi_itimelimit = newlim->d_itimer; - ddq->d_itimer = cpu_to_be32(newlim->d_itimer); - } - if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { - q->qi_rtbtimelimit = newlim->d_rtbtimer; - ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); - } - if (newlim->d_fieldmask & FS_DQ_BWARNS) - q->qi_bwarnlimit = newlim->d_bwarns; - if (newlim->d_fieldmask & FS_DQ_IWARNS) - q->qi_iwarnlimit = newlim->d_iwarns; - if (newlim->d_fieldmask & FS_DQ_RTBWARNS) - q->qi_rtbwarnlimit = newlim->d_rtbwarns; - } else { - /* - * If the user is now over quota, start the timelimit. - * The user will not be 'warned'. - * Note that we keep the timers ticking, whether enforcement - * is on or off. We don't really want to bother with iterating - * over all ondisk dquots and turning the timers on/off. - */ - xfs_qm_adjust_dqtimers(mp, ddq); - } - dqp->dq_flags |= XFS_DQ_DIRTY; - xfs_trans_log_dquot(tp, dqp); - - error = xfs_trans_commit(tp, 0); - xfs_qm_dqrele(dqp); - - out_unlock: - mutex_unlock(&q->qi_quotaofflock); - return error; -} - -int -xfs_qm_scall_getquota( - xfs_mount_t *mp, - xfs_dqid_t id, - uint type, - fs_disk_quota_t *out) -{ - xfs_dquot_t *dqp; - int error; - - /* - * Try to get the dquot. We don't want it allocated on disk, so - * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't - * exist, we'll get ENOENT back. - */ - if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) { - return (error); - } - - /* - * If everything's NULL, this dquot doesn't quite exist as far as - * our utility programs are concerned. - */ - if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { - xfs_qm_dqput(dqp); - return XFS_ERROR(ENOENT); - } - /* - * Convert the disk dquot to the exportable format - */ - xfs_qm_export_dquot(mp, &dqp->q_core, out); - xfs_qm_dqput(dqp); - return (error ? XFS_ERROR(EFAULT) : 0); -} - - -STATIC int -xfs_qm_log_quotaoff_end( - xfs_mount_t *mp, - xfs_qoff_logitem_t *startqoff, - uint flags) -{ - xfs_trans_t *tp; - int error; - xfs_qoff_logitem_t *qoffi; - - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); - - if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2, - 0, 0, XFS_DEFAULT_LOG_COUNT))) { - xfs_trans_cancel(tp, 0); - return (error); - } - - qoffi = xfs_trans_get_qoff_item(tp, startqoff, - flags & XFS_ALL_QUOTA_ACCT); - xfs_trans_log_quotaoff_item(tp, qoffi); - - /* - * We have to make sure that the transaction is secure on disk before we - * return and actually stop quota accounting. So, make it synchronous. - * We don't care about quotoff's performance. - */ - xfs_trans_set_sync(tp); - error = xfs_trans_commit(tp, 0); - return (error); -} - - -STATIC int -xfs_qm_log_quotaoff( - xfs_mount_t *mp, - xfs_qoff_logitem_t **qoffstartp, - uint flags) -{ - xfs_trans_t *tp; - int error; - xfs_qoff_logitem_t *qoffi=NULL; - uint oldsbqflag=0; - - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); - if ((error = xfs_trans_reserve(tp, 0, - sizeof(xfs_qoff_logitem_t) * 2 + - mp->m_sb.sb_sectsize + 128, - 0, - 0, - XFS_DEFAULT_LOG_COUNT))) { - goto error0; - } - - qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); - xfs_trans_log_quotaoff_item(tp, qoffi); - - spin_lock(&mp->m_sb_lock); - oldsbqflag = mp->m_sb.sb_qflags; - mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; - spin_unlock(&mp->m_sb_lock); - - xfs_mod_sb(tp, XFS_SB_QFLAGS); - - /* - * We have to make sure that the transaction is secure on disk before we - * return and actually stop quota accounting. So, make it synchronous. - * We don't care about quotoff's performance. - */ - xfs_trans_set_sync(tp); - error = xfs_trans_commit(tp, 0); - -error0: - if (error) { - xfs_trans_cancel(tp, 0); - /* - * No one else is modifying sb_qflags, so this is OK. - * We still hold the quotaofflock. - */ - spin_lock(&mp->m_sb_lock); - mp->m_sb.sb_qflags = oldsbqflag; - spin_unlock(&mp->m_sb_lock); - } - *qoffstartp = qoffi; - return (error); -} - - -/* - * Translate an internal style on-disk-dquot to the exportable format. - * The main differences are that the counters/limits are all in Basic - * Blocks (BBs) instead of the internal FSBs, and all on-disk data has - * to be converted to the native endianness. - */ -STATIC void -xfs_qm_export_dquot( - xfs_mount_t *mp, - xfs_disk_dquot_t *src, - struct fs_disk_quota *dst) -{ - memset(dst, 0, sizeof(*dst)); - dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */ - dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags); - dst->d_id = be32_to_cpu(src->d_id); - dst->d_blk_hardlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit)); - dst->d_blk_softlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit)); - dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit); - dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit); - dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount)); - dst->d_icount = be64_to_cpu(src->d_icount); - dst->d_btimer = be32_to_cpu(src->d_btimer); - dst->d_itimer = be32_to_cpu(src->d_itimer); - dst->d_iwarns = be16_to_cpu(src->d_iwarns); - dst->d_bwarns = be16_to_cpu(src->d_bwarns); - dst->d_rtb_hardlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit)); - dst->d_rtb_softlimit = - XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit)); - dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount)); - dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer); - dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns); - - /* - * Internally, we don't reset all the timers when quota enforcement - * gets turned off. No need to confuse the user level code, - * so return zeroes in that case. - */ - if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) || - (!XFS_IS_OQUOTA_ENFORCED(mp) && - (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { - dst->d_btimer = 0; - dst->d_itimer = 0; - dst->d_rtbtimer = 0; - } - -#ifdef DEBUG - if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || - (XFS_IS_OQUOTA_ENFORCED(mp) && - (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && - dst->d_id != 0) { - if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && - (dst->d_blk_softlimit > 0)) { - ASSERT(dst->d_btimer != 0); - } - if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && - (dst->d_ino_softlimit > 0)) { - ASSERT(dst->d_itimer != 0); - } - } -#endif -} - -STATIC uint -xfs_qm_export_qtype_flags( - uint flags) -{ - /* - * Can't be more than one, or none. - */ - ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != - (FS_PROJ_QUOTA | FS_USER_QUOTA)); - ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != - (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); - ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != - (FS_USER_QUOTA | FS_GROUP_QUOTA)); - ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); - - return (flags & XFS_DQ_USER) ? - FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? - FS_PROJ_QUOTA : FS_GROUP_QUOTA; -} - -STATIC uint -xfs_qm_export_flags( - uint flags) -{ - uint uflags; - - uflags = 0; - if (flags & XFS_UQUOTA_ACCT) - uflags |= FS_QUOTA_UDQ_ACCT; - if (flags & XFS_PQUOTA_ACCT) - uflags |= FS_QUOTA_PDQ_ACCT; - if (flags & XFS_GQUOTA_ACCT) - uflags |= FS_QUOTA_GDQ_ACCT; - if (flags & XFS_UQUOTA_ENFD) - uflags |= FS_QUOTA_UDQ_ENFD; - if (flags & (XFS_OQUOTA_ENFD)) { - uflags |= (flags & XFS_GQUOTA_ACCT) ? - FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD; - } - return (uflags); -} - - -STATIC int -xfs_dqrele_inode( - struct xfs_inode *ip, - struct xfs_perag *pag, - int flags) -{ - /* skip quota inodes */ - if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || - ip == ip->i_mount->m_quotainfo->qi_gquotaip) { - ASSERT(ip->i_udquot == NULL); - ASSERT(ip->i_gdquot == NULL); - return 0; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { - xfs_qm_dqrele(ip->i_udquot); - ip->i_udquot = NULL; - } - if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { - xfs_qm_dqrele(ip->i_gdquot); - ip->i_gdquot = NULL; - } - xfs_iunlock(ip, XFS_ILOCK_EXCL); - return 0; -} - - -/* - * Go thru all the inodes in the file system, releasing their dquots. - * - * Note that the mount structure gets modified to indicate that quotas are off - * AFTER this, in the case of quotaoff. - */ -void -xfs_qm_dqrele_all_inodes( - struct xfs_mount *mp, - uint flags) -{ - ASSERT(mp->m_quotainfo); - xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags); -} diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h deleted file mode 100644 index 94a3d92..0000000 --- a/fs/xfs/quota/xfs_quota_priv.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2000-2003 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_QUOTA_PRIV_H__ -#define __XFS_QUOTA_PRIV_H__ - -/* - * Number of bmaps that we ask from bmapi when doing a quotacheck. - * We make this restriction to keep the memory usage to a minimum. - */ -#define XFS_DQITER_MAP_SIZE 10 - -/* - * Hash into a bucket in the dquot hash table, based on . - */ -#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ - (__psunsigned_t)(id)) & \ - (xfs_Gqm->qm_dqhashmask - 1)) -#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \ - (xfs_Gqm->qm_usr_dqhtable + \ - XFS_DQ_HASHVAL(mp, id)) : \ - (xfs_Gqm->qm_grp_dqhtable + \ - XFS_DQ_HASHVAL(mp, id))) -#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ - !dqp->q_core.d_blk_hardlimit && \ - !dqp->q_core.d_blk_softlimit && \ - !dqp->q_core.d_rtb_hardlimit && \ - !dqp->q_core.d_rtb_softlimit && \ - !dqp->q_core.d_ino_hardlimit && \ - !dqp->q_core.d_ino_softlimit && \ - !dqp->q_core.d_bcount && \ - !dqp->q_core.d_rtbcount && \ - !dqp->q_core.d_icount) - -#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ - (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ - (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???"))) - -#endif /* __XFS_QUOTA_PRIV_H__ */ diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c deleted file mode 100644 index 4d00ee6..0000000 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ /dev/null @@ -1,890 +0,0 @@ -/* - * Copyright (c) 2000-2002 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_alloc.h" -#include "xfs_quota.h" -#include "xfs_mount.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" -#include "xfs_itable.h" -#include "xfs_bmap.h" -#include "xfs_rtalloc.h" -#include "xfs_error.h" -#include "xfs_attr.h" -#include "xfs_buf_item.h" -#include "xfs_trans_priv.h" -#include "xfs_qm.h" - -STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); - -/* - * Add the locked dquot to the transaction. - * The dquot must be locked, and it cannot be associated with any - * transaction. - */ -void -xfs_trans_dqjoin( - xfs_trans_t *tp, - xfs_dquot_t *dqp) -{ - ASSERT(dqp->q_transp != tp); - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(dqp->q_logitem.qli_dquot == dqp); - - /* - * Get a log_item_desc to point at the new item. - */ - xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); - - /* - * Initialize d_transp so we can later determine if this dquot is - * associated with this transaction. - */ - dqp->q_transp = tp; -} - - -/* - * This is called to mark the dquot as needing - * to be logged when the transaction is committed. The dquot must - * already be associated with the given transaction. - * Note that it marks the entire transaction as dirty. In the ordinary - * case, this gets called via xfs_trans_commit, after the transaction - * is already dirty. However, there's nothing stop this from getting - * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY - * flag. - */ -void -xfs_trans_log_dquot( - xfs_trans_t *tp, - xfs_dquot_t *dqp) -{ - ASSERT(dqp->q_transp == tp); - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - - tp->t_flags |= XFS_TRANS_DIRTY; - dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY; -} - -/* - * Carry forward whatever is left of the quota blk reservation to - * the spanky new transaction - */ -void -xfs_trans_dup_dqinfo( - xfs_trans_t *otp, - xfs_trans_t *ntp) -{ - xfs_dqtrx_t *oq, *nq; - int i,j; - xfs_dqtrx_t *oqa, *nqa; - - if (!otp->t_dqinfo) - return; - - xfs_trans_alloc_dqinfo(ntp); - oqa = otp->t_dqinfo->dqa_usrdquots; - nqa = ntp->t_dqinfo->dqa_usrdquots; - - /* - * Because the quota blk reservation is carried forward, - * it is also necessary to carry forward the DQ_DIRTY flag. - */ - if(otp->t_flags & XFS_TRANS_DQ_DIRTY) - ntp->t_flags |= XFS_TRANS_DQ_DIRTY; - - for (j = 0; j < 2; j++) { - for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { - if (oqa[i].qt_dquot == NULL) - break; - oq = &oqa[i]; - nq = &nqa[i]; - - nq->qt_dquot = oq->qt_dquot; - nq->qt_bcount_delta = nq->qt_icount_delta = 0; - nq->qt_rtbcount_delta = 0; - - /* - * Transfer whatever is left of the reservations. - */ - nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used; - oq->qt_blk_res = oq->qt_blk_res_used; - - nq->qt_rtblk_res = oq->qt_rtblk_res - - oq->qt_rtblk_res_used; - oq->qt_rtblk_res = oq->qt_rtblk_res_used; - - nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; - oq->qt_ino_res = oq->qt_ino_res_used; - - } - oqa = otp->t_dqinfo->dqa_grpdquots; - nqa = ntp->t_dqinfo->dqa_grpdquots; - } -} - -/* - * Wrap around mod_dquot to account for both user and group quotas. - */ -void -xfs_trans_mod_dquot_byino( - xfs_trans_t *tp, - xfs_inode_t *ip, - uint field, - long delta) -{ - xfs_mount_t *mp = tp->t_mountp; - - if (!XFS_IS_QUOTA_RUNNING(mp) || - !XFS_IS_QUOTA_ON(mp) || - ip->i_ino == mp->m_sb.sb_uquotino || - ip->i_ino == mp->m_sb.sb_gquotino) - return; - - if (tp->t_dqinfo == NULL) - xfs_trans_alloc_dqinfo(tp); - - if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) - (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); - if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot) - (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); -} - -STATIC xfs_dqtrx_t * -xfs_trans_get_dqtrx( - xfs_trans_t *tp, - xfs_dquot_t *dqp) -{ - int i; - xfs_dqtrx_t *qa; - - qa = XFS_QM_ISUDQ(dqp) ? - tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots; - - for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { - if (qa[i].qt_dquot == NULL || - qa[i].qt_dquot == dqp) - return &qa[i]; - } - - return NULL; -} - -/* - * Make the changes in the transaction structure. - * The moral equivalent to xfs_trans_mod_sb(). - * We don't touch any fields in the dquot, so we don't care - * if it's locked or not (most of the time it won't be). - */ -void -xfs_trans_mod_dquot( - xfs_trans_t *tp, - xfs_dquot_t *dqp, - uint field, - long delta) -{ - xfs_dqtrx_t *qtrx; - - ASSERT(tp); - ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); - qtrx = NULL; - - if (tp->t_dqinfo == NULL) - xfs_trans_alloc_dqinfo(tp); - /* - * Find either the first free slot or the slot that belongs - * to this dquot. - */ - qtrx = xfs_trans_get_dqtrx(tp, dqp); - ASSERT(qtrx); - if (qtrx->qt_dquot == NULL) - qtrx->qt_dquot = dqp; - - switch (field) { - - /* - * regular disk blk reservation - */ - case XFS_TRANS_DQ_RES_BLKS: - qtrx->qt_blk_res += (ulong)delta; - break; - - /* - * inode reservation - */ - case XFS_TRANS_DQ_RES_INOS: - qtrx->qt_ino_res += (ulong)delta; - break; - - /* - * disk blocks used. - */ - case XFS_TRANS_DQ_BCOUNT: - if (qtrx->qt_blk_res && delta > 0) { - qtrx->qt_blk_res_used += (ulong)delta; - ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used); - } - qtrx->qt_bcount_delta += delta; - break; - - case XFS_TRANS_DQ_DELBCOUNT: - qtrx->qt_delbcnt_delta += delta; - break; - - /* - * Inode Count - */ - case XFS_TRANS_DQ_ICOUNT: - if (qtrx->qt_ino_res && delta > 0) { - qtrx->qt_ino_res_used += (ulong)delta; - ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); - } - qtrx->qt_icount_delta += delta; - break; - - /* - * rtblk reservation - */ - case XFS_TRANS_DQ_RES_RTBLKS: - qtrx->qt_rtblk_res += (ulong)delta; - break; - - /* - * rtblk count - */ - case XFS_TRANS_DQ_RTBCOUNT: - if (qtrx->qt_rtblk_res && delta > 0) { - qtrx->qt_rtblk_res_used += (ulong)delta; - ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); - } - qtrx->qt_rtbcount_delta += delta; - break; - - case XFS_TRANS_DQ_DELRTBCOUNT: - qtrx->qt_delrtb_delta += delta; - break; - - default: - ASSERT(0); - } - tp->t_flags |= XFS_TRANS_DQ_DIRTY; -} - - -/* - * Given an array of dqtrx structures, lock all the dquots associated - * and join them to the transaction, provided they have been modified. - * We know that the highest number of dquots (of one type - usr OR grp), - * involved in a transaction is 2 and that both usr and grp combined - 3. - * So, we don't attempt to make this very generic. - */ -STATIC void -xfs_trans_dqlockedjoin( - xfs_trans_t *tp, - xfs_dqtrx_t *q) -{ - ASSERT(q[0].qt_dquot != NULL); - if (q[1].qt_dquot == NULL) { - xfs_dqlock(q[0].qt_dquot); - xfs_trans_dqjoin(tp, q[0].qt_dquot); - } else { - ASSERT(XFS_QM_TRANS_MAXDQS == 2); - xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); - xfs_trans_dqjoin(tp, q[0].qt_dquot); - xfs_trans_dqjoin(tp, q[1].qt_dquot); - } -} - - -/* - * Called by xfs_trans_commit() and similar in spirit to - * xfs_trans_apply_sb_deltas(). - * Go thru all the dquots belonging to this transaction and modify the - * INCORE dquot to reflect the actual usages. - * Unreserve just the reservations done by this transaction. - * dquot is still left locked at exit. - */ -void -xfs_trans_apply_dquot_deltas( - xfs_trans_t *tp) -{ - int i, j; - xfs_dquot_t *dqp; - xfs_dqtrx_t *qtrx, *qa; - xfs_disk_dquot_t *d; - long totalbdelta; - long totalrtbdelta; - - if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) - return; - - ASSERT(tp->t_dqinfo); - qa = tp->t_dqinfo->dqa_usrdquots; - for (j = 0; j < 2; j++) { - if (qa[0].qt_dquot == NULL) { - qa = tp->t_dqinfo->dqa_grpdquots; - continue; - } - - /* - * Lock all of the dquots and join them to the transaction. - */ - xfs_trans_dqlockedjoin(tp, qa); - - for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { - qtrx = &qa[i]; - /* - * The array of dquots is filled - * sequentially, not sparsely. - */ - if ((dqp = qtrx->qt_dquot) == NULL) - break; - - ASSERT(XFS_DQ_IS_LOCKED(dqp)); - ASSERT(dqp->q_transp == tp); - - /* - * adjust the actual number of blocks used - */ - d = &dqp->q_core; - - /* - * The issue here is - sometimes we don't make a blkquota - * reservation intentionally to be fair to users - * (when the amount is small). On the other hand, - * delayed allocs do make reservations, but that's - * outside of a transaction, so we have no - * idea how much was really reserved. - * So, here we've accumulated delayed allocation blks and - * non-delay blks. The assumption is that the - * delayed ones are always reserved (outside of a - * transaction), and the others may or may not have - * quota reservations. - */ - totalbdelta = qtrx->qt_bcount_delta + - qtrx->qt_delbcnt_delta; - totalrtbdelta = qtrx->qt_rtbcount_delta + - qtrx->qt_delrtb_delta; -#ifdef DEBUG - if (totalbdelta < 0) - ASSERT(be64_to_cpu(d->d_bcount) >= - -totalbdelta); - - if (totalrtbdelta < 0) - ASSERT(be64_to_cpu(d->d_rtbcount) >= - -totalrtbdelta); - - if (qtrx->qt_icount_delta < 0) - ASSERT(be64_to_cpu(d->d_icount) >= - -qtrx->qt_icount_delta); -#endif - if (totalbdelta) - be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); - - if (qtrx->qt_icount_delta) - be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); - - if (totalrtbdelta) - be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); - - /* - * Get any default limits in use. - * Start/reset the timer(s) if needed. - */ - if (d->d_id) { - xfs_qm_adjust_dqlimits(tp->t_mountp, d); - xfs_qm_adjust_dqtimers(tp->t_mountp, d); - } - - dqp->dq_flags |= XFS_DQ_DIRTY; - /* - * add this to the list of items to get logged - */ - xfs_trans_log_dquot(tp, dqp); - /* - * Take off what's left of the original reservation. - * In case of delayed allocations, there's no - * reservation that a transaction structure knows of. - */ - if (qtrx->qt_blk_res != 0) { - if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { - if (qtrx->qt_blk_res > - qtrx->qt_blk_res_used) - dqp->q_res_bcount -= (xfs_qcnt_t) - (qtrx->qt_blk_res - - qtrx->qt_blk_res_used); - else - dqp->q_res_bcount -= (xfs_qcnt_t) - (qtrx->qt_blk_res_used - - qtrx->qt_blk_res); - } - } else { - /* - * These blks were never reserved, either inside - * a transaction or outside one (in a delayed - * allocation). Also, this isn't always a - * negative number since we sometimes - * deliberately skip quota reservations. - */ - if (qtrx->qt_bcount_delta) { - dqp->q_res_bcount += - (xfs_qcnt_t)qtrx->qt_bcount_delta; - } - } - /* - * Adjust the RT reservation. - */ - if (qtrx->qt_rtblk_res != 0) { - if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) { - if (qtrx->qt_rtblk_res > - qtrx->qt_rtblk_res_used) - dqp->q_res_rtbcount -= (xfs_qcnt_t) - (qtrx->qt_rtblk_res - - qtrx->qt_rtblk_res_used); - else - dqp->q_res_rtbcount -= (xfs_qcnt_t) - (qtrx->qt_rtblk_res_used - - qtrx->qt_rtblk_res); - } - } else { - if (qtrx->qt_rtbcount_delta) - dqp->q_res_rtbcount += - (xfs_qcnt_t)qtrx->qt_rtbcount_delta; - } - - /* - * Adjust the inode reservation. - */ - if (qtrx->qt_ino_res != 0) { - ASSERT(qtrx->qt_ino_res >= - qtrx->qt_ino_res_used); - if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) - dqp->q_res_icount -= (xfs_qcnt_t) - (qtrx->qt_ino_res - - qtrx->qt_ino_res_used); - } else { - if (qtrx->qt_icount_delta) - dqp->q_res_icount += - (xfs_qcnt_t)qtrx->qt_icount_delta; - } - - ASSERT(dqp->q_res_bcount >= - be64_to_cpu(dqp->q_core.d_bcount)); - ASSERT(dqp->q_res_icount >= - be64_to_cpu(dqp->q_core.d_icount)); - ASSERT(dqp->q_res_rtbcount >= - be64_to_cpu(dqp->q_core.d_rtbcount)); - } - /* - * Do the group quotas next - */ - qa = tp->t_dqinfo->dqa_grpdquots; - } -} - -/* - * Release the reservations, and adjust the dquots accordingly. - * This is called only when the transaction is being aborted. If by - * any chance we have done dquot modifications incore (ie. deltas) already, - * we simply throw those away, since that's the expected behavior - * when a transaction is curtailed without a commit. - */ -void -xfs_trans_unreserve_and_mod_dquots( - xfs_trans_t *tp) -{ - int i, j; - xfs_dquot_t *dqp; - xfs_dqtrx_t *qtrx, *qa; - boolean_t locked; - - if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) - return; - - qa = tp->t_dqinfo->dqa_usrdquots; - - for (j = 0; j < 2; j++) { - for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { - qtrx = &qa[i]; - /* - * We assume that the array of dquots is filled - * sequentially, not sparsely. - */ - if ((dqp = qtrx->qt_dquot) == NULL) - break; - /* - * Unreserve the original reservation. We don't care - * about the number of blocks used field, or deltas. - * Also we don't bother to zero the fields. - */ - locked = B_FALSE; - if (qtrx->qt_blk_res) { - xfs_dqlock(dqp); - locked = B_TRUE; - dqp->q_res_bcount -= - (xfs_qcnt_t)qtrx->qt_blk_res; - } - if (qtrx->qt_ino_res) { - if (!locked) { - xfs_dqlock(dqp); - locked = B_TRUE; - } - dqp->q_res_icount -= - (xfs_qcnt_t)qtrx->qt_ino_res; - } - - if (qtrx->qt_rtblk_res) { - if (!locked) { - xfs_dqlock(dqp); - locked = B_TRUE; - } - dqp->q_res_rtbcount -= - (xfs_qcnt_t)qtrx->qt_rtblk_res; - } - if (locked) - xfs_dqunlock(dqp); - - } - qa = tp->t_dqinfo->dqa_grpdquots; - } -} - -STATIC void -xfs_quota_warn( - struct xfs_mount *mp, - struct xfs_dquot *dqp, - int type) -{ - /* no warnings for project quotas - we just return ENOSPC later */ - if (dqp->dq_flags & XFS_DQ_PROJ) - return; - quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA, - be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev, - type); -} - -/* - * This reserves disk blocks and inodes against a dquot. - * Flags indicate if the dquot is to be locked here and also - * if the blk reservation is for RT or regular blocks. - * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. - */ -STATIC int -xfs_trans_dqresv( - xfs_trans_t *tp, - xfs_mount_t *mp, - xfs_dquot_t *dqp, - long nblks, - long ninos, - uint flags) -{ - xfs_qcnt_t hardlimit; - xfs_qcnt_t softlimit; - time_t timer; - xfs_qwarncnt_t warns; - xfs_qwarncnt_t warnlimit; - xfs_qcnt_t count; - xfs_qcnt_t *resbcountp; - xfs_quotainfo_t *q = mp->m_quotainfo; - - - xfs_dqlock(dqp); - - if (flags & XFS_TRANS_DQ_RES_BLKS) { - hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); - if (!hardlimit) - hardlimit = q->qi_bhardlimit; - softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); - if (!softlimit) - softlimit = q->qi_bsoftlimit; - timer = be32_to_cpu(dqp->q_core.d_btimer); - warns = be16_to_cpu(dqp->q_core.d_bwarns); - warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; - resbcountp = &dqp->q_res_bcount; - } else { - ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); - hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); - if (!hardlimit) - hardlimit = q->qi_rtbhardlimit; - softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); - if (!softlimit) - softlimit = q->qi_rtbsoftlimit; - timer = be32_to_cpu(dqp->q_core.d_rtbtimer); - warns = be16_to_cpu(dqp->q_core.d_rtbwarns); - warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; - resbcountp = &dqp->q_res_rtbcount; - } - - if ((flags & XFS_QMOPT_FORCE_RES) == 0 && - dqp->q_core.d_id && - ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || - (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && - (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { - if (nblks > 0) { - /* - * dquot is locked already. See if we'd go over the - * hardlimit or exceed the timelimit if we allocate - * nblks. - */ - if (hardlimit > 0ULL && - hardlimit <= nblks + *resbcountp) { - xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); - goto error_return; - } - if (softlimit > 0ULL && - softlimit <= nblks + *resbcountp) { - if ((timer != 0 && get_seconds() > timer) || - (warns != 0 && warns >= warnlimit)) { - xfs_quota_warn(mp, dqp, - QUOTA_NL_BSOFTLONGWARN); - goto error_return; - } - - xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); - } - } - if (ninos > 0) { - count = be64_to_cpu(dqp->q_core.d_icount); - timer = be32_to_cpu(dqp->q_core.d_itimer); - warns = be16_to_cpu(dqp->q_core.d_iwarns); - warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; - hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); - if (!hardlimit) - hardlimit = q->qi_ihardlimit; - softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); - if (!softlimit) - softlimit = q->qi_isoftlimit; - - if (hardlimit > 0ULL && count >= hardlimit) { - xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); - goto error_return; - } - if (softlimit > 0ULL && count >= softlimit) { - if ((timer != 0 && get_seconds() > timer) || - (warns != 0 && warns >= warnlimit)) { - xfs_quota_warn(mp, dqp, - QUOTA_NL_ISOFTLONGWARN); - goto error_return; - } - xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); - } - } - } - - /* - * Change the reservation, but not the actual usage. - * Note that q_res_bcount = q_core.d_bcount + resv - */ - (*resbcountp) += (xfs_qcnt_t)nblks; - if (ninos != 0) - dqp->q_res_icount += (xfs_qcnt_t)ninos; - - /* - * note the reservation amt in the trans struct too, - * so that the transaction knows how much was reserved by - * it against this particular dquot. - * We don't do this when we are reserving for a delayed allocation, - * because we don't have the luxury of a transaction envelope then. - */ - if (tp) { - ASSERT(tp->t_dqinfo); - ASSERT(flags & XFS_QMOPT_RESBLK_MASK); - if (nblks != 0) - xfs_trans_mod_dquot(tp, dqp, - flags & XFS_QMOPT_RESBLK_MASK, - nblks); - if (ninos != 0) - xfs_trans_mod_dquot(tp, dqp, - XFS_TRANS_DQ_RES_INOS, - ninos); - } - ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); - ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); - ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); - - xfs_dqunlock(dqp); - return 0; - -error_return: - xfs_dqunlock(dqp); - if (flags & XFS_QMOPT_ENOSPC) - return ENOSPC; - return EDQUOT; -} - - -/* - * Given dquot(s), make disk block and/or inode reservations against them. - * The fact that this does the reservation against both the usr and - * grp/prj quotas is important, because this follows a both-or-nothing - * approach. - * - * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. - * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. - * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks - * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks - * dquots are unlocked on return, if they were not locked by caller. - */ -int -xfs_trans_reserve_quota_bydquots( - xfs_trans_t *tp, - xfs_mount_t *mp, - xfs_dquot_t *udqp, - xfs_dquot_t *gdqp, - long nblks, - long ninos, - uint flags) -{ - int resvd = 0, error; - - if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) - return 0; - - if (tp && tp->t_dqinfo == NULL) - xfs_trans_alloc_dqinfo(tp); - - ASSERT(flags & XFS_QMOPT_RESBLK_MASK); - - if (udqp) { - error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, - (flags & ~XFS_QMOPT_ENOSPC)); - if (error) - return error; - resvd = 1; - } - - if (gdqp) { - error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); - if (error) { - /* - * can't do it, so backout previous reservation - */ - if (resvd) { - flags |= XFS_QMOPT_FORCE_RES; - xfs_trans_dqresv(tp, mp, udqp, - -nblks, -ninos, flags); - } - return error; - } - } - - /* - * Didn't change anything critical, so, no need to log - */ - return 0; -} - - -/* - * Lock the dquot and change the reservation if we can. - * This doesn't change the actual usage, just the reservation. - * The inode sent in is locked. - */ -int -xfs_trans_reserve_quota_nblks( - struct xfs_trans *tp, - struct xfs_inode *ip, - long nblks, - long ninos, - uint flags) -{ - struct xfs_mount *mp = ip->i_mount; - - if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) - return 0; - if (XFS_IS_PQUOTA_ON(mp)) - flags |= XFS_QMOPT_ENOSPC; - - ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); - ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == - XFS_TRANS_DQ_RES_RTBLKS || - (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == - XFS_TRANS_DQ_RES_BLKS); - - /* - * Reserve nblks against these dquots, with trans as the mediator. - */ - return xfs_trans_reserve_quota_bydquots(tp, mp, - ip->i_udquot, ip->i_gdquot, - nblks, ninos, flags); -} - -/* - * This routine is called to allocate a quotaoff log item. - */ -xfs_qoff_logitem_t * -xfs_trans_get_qoff_item( - xfs_trans_t *tp, - xfs_qoff_logitem_t *startqoff, - uint flags) -{ - xfs_qoff_logitem_t *q; - - ASSERT(tp != NULL); - - q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); - ASSERT(q != NULL); - - /* - * Get a log_item_desc to point at the new item. - */ - xfs_trans_add_item(tp, &q->qql_item); - return q; -} - - -/* - * This is called to mark the quotaoff logitem as needing - * to be logged when the transaction is committed. The logitem must - * already be associated with the given transaction. - */ -void -xfs_trans_log_quotaoff_item( - xfs_trans_t *tp, - xfs_qoff_logitem_t *qlp) -{ - tp->t_flags |= XFS_TRANS_DIRTY; - qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY; -} - -STATIC void -xfs_trans_alloc_dqinfo( - xfs_trans_t *tp) -{ - tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); -} - -void -xfs_trans_free_dqinfo( - xfs_trans_t *tp) -{ - if (!tp->t_dqinfo) - return; - kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo); - tp->t_dqinfo = NULL; -} diff --git a/fs/xfs/support/uuid.c b/fs/xfs/support/uuid.c deleted file mode 100644 index b83f76b..0000000 --- a/fs/xfs/support/uuid.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include - -/* IRIX interpretation of an uuid_t */ -typedef struct { - __be32 uu_timelow; - __be16 uu_timemid; - __be16 uu_timehi; - __be16 uu_clockseq; - __be16 uu_node[3]; -} xfs_uu_t; - -/* - * uuid_getnodeuniq - obtain the node unique fields of a UUID. - * - * This is not in any way a standard or condoned UUID function; - * it just something that's needed for user-level file handles. - */ -void -uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) -{ - xfs_uu_t *uup = (xfs_uu_t *)uuid; - - fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | - be16_to_cpu(uup->uu_timemid); - fsid[1] = be32_to_cpu(uup->uu_timelow); -} - -int -uuid_is_nil(uuid_t *uuid) -{ - int i; - char *cp = (char *)uuid; - - if (uuid == NULL) - return 0; - /* implied check of version number here... */ - for (i = 0; i < sizeof *uuid; i++) - if (*cp++) return 0; /* not nil */ - return 1; /* is nil */ -} - -int -uuid_equal(uuid_t *uuid1, uuid_t *uuid2) -{ - return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1; -} diff --git a/fs/xfs/support/uuid.h b/fs/xfs/support/uuid.h deleted file mode 100644 index 4732d71..0000000 --- a/fs/xfs/support/uuid.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_UUID_H__ -#define __XFS_SUPPORT_UUID_H__ - -typedef struct { - unsigned char __u_bits[16]; -} uuid_t; - -extern int uuid_is_nil(uuid_t *uuid); -extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2); -extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]); - -#endif /* __XFS_SUPPORT_UUID_H__ */ diff --git a/fs/xfs/time.h b/fs/xfs/time.h new file mode 100644 index 0000000..387e695 --- /dev/null +++ b/fs/xfs/time.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SUPPORT_TIME_H__ +#define __XFS_SUPPORT_TIME_H__ + +#include +#include + +typedef struct timespec timespec_t; + +static inline void delay(long ticks) +{ + schedule_timeout_uninterruptible(ticks); +} + +static inline void nanotime(struct timespec *tvp) +{ + *tvp = CURRENT_TIME; +} + +#endif /* __XFS_SUPPORT_TIME_H__ */ diff --git a/fs/xfs/uuid.c b/fs/xfs/uuid.c new file mode 100644 index 0000000..b83f76b --- /dev/null +++ b/fs/xfs/uuid.c @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include + +/* IRIX interpretation of an uuid_t */ +typedef struct { + __be32 uu_timelow; + __be16 uu_timemid; + __be16 uu_timehi; + __be16 uu_clockseq; + __be16 uu_node[3]; +} xfs_uu_t; + +/* + * uuid_getnodeuniq - obtain the node unique fields of a UUID. + * + * This is not in any way a standard or condoned UUID function; + * it just something that's needed for user-level file handles. + */ +void +uuid_getnodeuniq(uuid_t *uuid, int fsid [2]) +{ + xfs_uu_t *uup = (xfs_uu_t *)uuid; + + fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) | + be16_to_cpu(uup->uu_timemid); + fsid[1] = be32_to_cpu(uup->uu_timelow); +} + +int +uuid_is_nil(uuid_t *uuid) +{ + int i; + char *cp = (char *)uuid; + + if (uuid == NULL) + return 0; + /* implied check of version number here... */ + for (i = 0; i < sizeof *uuid; i++) + if (*cp++) return 0; /* not nil */ + return 1; /* is nil */ +} + +int +uuid_equal(uuid_t *uuid1, uuid_t *uuid2) +{ + return memcmp(uuid1, uuid2, sizeof(uuid_t)) ? 0 : 1; +} diff --git a/fs/xfs/uuid.h b/fs/xfs/uuid.h new file mode 100644 index 0000000..4732d71 --- /dev/null +++ b/fs/xfs/uuid.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SUPPORT_UUID_H__ +#define __XFS_SUPPORT_UUID_H__ + +typedef struct { + unsigned char __u_bits[16]; +} uuid_t; + +extern int uuid_is_nil(uuid_t *uuid); +extern int uuid_equal(uuid_t *uuid1, uuid_t *uuid2); +extern void uuid_getnodeuniq(uuid_t *uuid, int fsid [2]); + +#endif /* __XFS_SUPPORT_UUID_H__ */ diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c new file mode 100644 index 0000000..b6c4b37 --- /dev/null +++ b/fs/xfs/xfs_acl.c @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2008, Christoph Hellwig + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_acl.h" +#include "xfs_attr.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_vnodeops.h" +#include "xfs_trace.h" +#include +#include +#include + + +/* + * Locking scheme: + * - all ACL updates are protected by inode->i_mutex, which is taken before + * calling into this file. + */ + +STATIC struct posix_acl * +xfs_acl_from_disk(struct xfs_acl *aclp) +{ + struct posix_acl_entry *acl_e; + struct posix_acl *acl; + struct xfs_acl_entry *ace; + int count, i; + + count = be32_to_cpu(aclp->acl_cnt); + + acl = posix_acl_alloc(count, GFP_KERNEL); + if (!acl) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < count; i++) { + acl_e = &acl->a_entries[i]; + ace = &aclp->acl_entry[i]; + + /* + * The tag is 32 bits on disk and 16 bits in core. + * + * Because every access to it goes through the core + * format first this is not a problem. + */ + acl_e->e_tag = be32_to_cpu(ace->ae_tag); + acl_e->e_perm = be16_to_cpu(ace->ae_perm); + + switch (acl_e->e_tag) { + case ACL_USER: + case ACL_GROUP: + acl_e->e_id = be32_to_cpu(ace->ae_id); + break; + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + acl_e->e_id = ACL_UNDEFINED_ID; + break; + default: + goto fail; + } + } + return acl; + +fail: + posix_acl_release(acl); + return ERR_PTR(-EINVAL); +} + +STATIC void +xfs_acl_to_disk(struct xfs_acl *aclp, const struct posix_acl *acl) +{ + const struct posix_acl_entry *acl_e; + struct xfs_acl_entry *ace; + int i; + + aclp->acl_cnt = cpu_to_be32(acl->a_count); + for (i = 0; i < acl->a_count; i++) { + ace = &aclp->acl_entry[i]; + acl_e = &acl->a_entries[i]; + + ace->ae_tag = cpu_to_be32(acl_e->e_tag); + ace->ae_id = cpu_to_be32(acl_e->e_id); + ace->ae_perm = cpu_to_be16(acl_e->e_perm); + } +} + +struct posix_acl * +xfs_get_acl(struct inode *inode, int type) +{ + struct xfs_inode *ip = XFS_I(inode); + struct posix_acl *acl; + struct xfs_acl *xfs_acl; + int len = sizeof(struct xfs_acl); + unsigned char *ea_name; + int error; + + acl = get_cached_acl(inode, type); + if (acl != ACL_NOT_CACHED) + return acl; + + trace_xfs_get_acl(ip); + + switch (type) { + case ACL_TYPE_ACCESS: + ea_name = SGI_ACL_FILE; + break; + case ACL_TYPE_DEFAULT: + ea_name = SGI_ACL_DEFAULT; + break; + default: + BUG(); + } + + /* + * If we have a cached ACLs value just return it, not need to + * go out to the disk. + */ + + xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); + if (!xfs_acl) + return ERR_PTR(-ENOMEM); + + error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, + &len, ATTR_ROOT); + if (error) { + /* + * If the attribute doesn't exist make sure we have a negative + * cache entry, for any other error assume it is transient and + * leave the cache entry as ACL_NOT_CACHED. + */ + if (error == -ENOATTR) { + acl = NULL; + goto out_update_cache; + } + goto out; + } + + acl = xfs_acl_from_disk(xfs_acl); + if (IS_ERR(acl)) + goto out; + + out_update_cache: + set_cached_acl(inode, type, acl); + out: + kfree(xfs_acl); + return acl; +} + +STATIC int +xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) +{ + struct xfs_inode *ip = XFS_I(inode); + unsigned char *ea_name; + int error; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + + switch (type) { + case ACL_TYPE_ACCESS: + ea_name = SGI_ACL_FILE; + break; + case ACL_TYPE_DEFAULT: + if (!S_ISDIR(inode->i_mode)) + return acl ? -EACCES : 0; + ea_name = SGI_ACL_DEFAULT; + break; + default: + return -EINVAL; + } + + if (acl) { + struct xfs_acl *xfs_acl; + int len; + + xfs_acl = kzalloc(sizeof(struct xfs_acl), GFP_KERNEL); + if (!xfs_acl) + return -ENOMEM; + + xfs_acl_to_disk(xfs_acl, acl); + len = sizeof(struct xfs_acl) - + (sizeof(struct xfs_acl_entry) * + (XFS_ACL_MAX_ENTRIES - acl->a_count)); + + error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, + len, ATTR_ROOT); + + kfree(xfs_acl); + } else { + /* + * A NULL ACL argument means we want to remove the ACL. + */ + error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); + + /* + * If the attribute didn't exist to start with that's fine. + */ + if (error == -ENOATTR) + error = 0; + } + + if (!error) + set_cached_acl(inode, type, acl); + return error; +} + +static int +xfs_set_mode(struct inode *inode, umode_t mode) +{ + int error = 0; + + if (mode != inode->i_mode) { + struct iattr iattr; + + iattr.ia_valid = ATTR_MODE | ATTR_CTIME; + iattr.ia_mode = mode; + iattr.ia_ctime = current_fs_time(inode->i_sb); + + error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); + } + + return error; +} + +static int +xfs_acl_exists(struct inode *inode, unsigned char *name) +{ + int len = sizeof(struct xfs_acl); + + return (xfs_attr_get(XFS_I(inode), name, NULL, &len, + ATTR_ROOT|ATTR_KERNOVAL) == 0); +} + +int +posix_acl_access_exists(struct inode *inode) +{ + return xfs_acl_exists(inode, SGI_ACL_FILE); +} + +int +posix_acl_default_exists(struct inode *inode) +{ + if (!S_ISDIR(inode->i_mode)) + return 0; + return xfs_acl_exists(inode, SGI_ACL_DEFAULT); +} + +/* + * No need for i_mutex because the inode is not yet exposed to the VFS. + */ +int +xfs_inherit_acl(struct inode *inode, struct posix_acl *acl) +{ + umode_t mode = inode->i_mode; + int error = 0, inherit = 0; + + if (S_ISDIR(inode->i_mode)) { + error = xfs_set_acl(inode, ACL_TYPE_DEFAULT, acl); + if (error) + goto out; + } + + error = posix_acl_create(&acl, GFP_KERNEL, &mode); + if (error < 0) + return error; + + /* + * If posix_acl_create returns a positive value we need to + * inherit a permission that can't be represented using the Unix + * mode bits and we actually need to set an ACL. + */ + if (error > 0) + inherit = 1; + + error = xfs_set_mode(inode, mode); + if (error) + goto out; + + if (inherit) + error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); + +out: + posix_acl_release(acl); + return error; +} + +int +xfs_acl_chmod(struct inode *inode) +{ + struct posix_acl *acl; + int error; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + + acl = xfs_get_acl(inode, ACL_TYPE_ACCESS); + if (IS_ERR(acl) || !acl) + return PTR_ERR(acl); + + error = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); + if (error) + return error; + + error = xfs_set_acl(inode, ACL_TYPE_ACCESS, acl); + posix_acl_release(acl); + return error; +} + +static int +xfs_xattr_acl_get(struct dentry *dentry, const char *name, + void *value, size_t size, int type) +{ + struct posix_acl *acl; + int error; + + acl = xfs_get_acl(dentry->d_inode, type); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl == NULL) + return -ENODATA; + + error = posix_acl_to_xattr(acl, value, size); + posix_acl_release(acl); + + return error; +} + +static int +xfs_xattr_acl_set(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, int type) +{ + struct inode *inode = dentry->d_inode; + struct posix_acl *acl = NULL; + int error = 0; + + if (flags & XATTR_CREATE) + return -EINVAL; + if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) + return value ? -EACCES : 0; + if ((current_fsuid() != inode->i_uid) && !capable(CAP_FOWNER)) + return -EPERM; + + if (!value) + goto set_acl; + + acl = posix_acl_from_xattr(value, size); + if (!acl) { + /* + * acl_set_file(3) may request that we set default ACLs with + * zero length -- defend (gracefully) against that here. + */ + goto out; + } + if (IS_ERR(acl)) { + error = PTR_ERR(acl); + goto out; + } + + error = posix_acl_valid(acl); + if (error) + goto out_release; + + error = -EINVAL; + if (acl->a_count > XFS_ACL_MAX_ENTRIES) + goto out_release; + + if (type == ACL_TYPE_ACCESS) { + umode_t mode = inode->i_mode; + error = posix_acl_equiv_mode(acl, &mode); + + if (error <= 0) { + posix_acl_release(acl); + acl = NULL; + + if (error < 0) + return error; + } + + error = xfs_set_mode(inode, mode); + if (error) + goto out_release; + } + + set_acl: + error = xfs_set_acl(inode, type, acl); + out_release: + posix_acl_release(acl); + out: + return error; +} + +const struct xattr_handler xfs_xattr_acl_access_handler = { + .prefix = POSIX_ACL_XATTR_ACCESS, + .flags = ACL_TYPE_ACCESS, + .get = xfs_xattr_acl_get, + .set = xfs_xattr_acl_set, +}; + +const struct xattr_handler xfs_xattr_acl_default_handler = { + .prefix = POSIX_ACL_XATTR_DEFAULT, + .flags = ACL_TYPE_DEFAULT, + .get = xfs_xattr_acl_get, + .set = xfs_xattr_acl_set, +}; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c new file mode 100644 index 0000000..63e971e --- /dev/null +++ b/fs/xfs/xfs_aops.c @@ -0,0 +1,1499 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_trans.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_error.h" +#include "xfs_rw.h" +#include "xfs_iomap.h" +#include "xfs_vnodeops.h" +#include "xfs_trace.h" +#include "xfs_bmap.h" +#include +#include +#include +#include + + +/* + * Prime number of hash buckets since address is used as the key. + */ +#define NVSYNC 37 +#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) +static wait_queue_head_t xfs_ioend_wq[NVSYNC]; + +void __init +xfs_ioend_init(void) +{ + int i; + + for (i = 0; i < NVSYNC; i++) + init_waitqueue_head(&xfs_ioend_wq[i]); +} + +void +xfs_ioend_wait( + xfs_inode_t *ip) +{ + wait_queue_head_t *wq = to_ioend_wq(ip); + + wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); +} + +STATIC void +xfs_ioend_wake( + xfs_inode_t *ip) +{ + if (atomic_dec_and_test(&ip->i_iocount)) + wake_up(to_ioend_wq(ip)); +} + +void +xfs_count_page_state( + struct page *page, + int *delalloc, + int *unwritten) +{ + struct buffer_head *bh, *head; + + *delalloc = *unwritten = 0; + + bh = head = page_buffers(page); + do { + if (buffer_unwritten(bh)) + (*unwritten) = 1; + else if (buffer_delay(bh)) + (*delalloc) = 1; + } while ((bh = bh->b_this_page) != head); +} + +STATIC struct block_device * +xfs_find_bdev_for_inode( + struct inode *inode) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + + if (XFS_IS_REALTIME_INODE(ip)) + return mp->m_rtdev_targp->bt_bdev; + else + return mp->m_ddev_targp->bt_bdev; +} + +/* + * We're now finished for good with this ioend structure. + * Update the page state via the associated buffer_heads, + * release holds on the inode and bio, and finally free + * up memory. Do not use the ioend after this. + */ +STATIC void +xfs_destroy_ioend( + xfs_ioend_t *ioend) +{ + struct buffer_head *bh, *next; + struct xfs_inode *ip = XFS_I(ioend->io_inode); + + for (bh = ioend->io_buffer_head; bh; bh = next) { + next = bh->b_private; + bh->b_end_io(bh, !ioend->io_error); + } + + /* + * Volume managers supporting multiple paths can send back ENODEV + * when the final path disappears. In this case continuing to fill + * the page cache with dirty data which cannot be written out is + * evil, so prevent that. + */ + if (unlikely(ioend->io_error == -ENODEV)) { + xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, + __FILE__, __LINE__); + } + + xfs_ioend_wake(ip); + mempool_free(ioend, xfs_ioend_pool); +} + +/* + * If the end of the current ioend is beyond the current EOF, + * return the new EOF value, otherwise zero. + */ +STATIC xfs_fsize_t +xfs_ioend_new_eof( + xfs_ioend_t *ioend) +{ + xfs_inode_t *ip = XFS_I(ioend->io_inode); + xfs_fsize_t isize; + xfs_fsize_t bsize; + + bsize = ioend->io_offset + ioend->io_size; + isize = MAX(ip->i_size, ip->i_new_size); + isize = MIN(isize, bsize); + return isize > ip->i_d.di_size ? isize : 0; +} + +/* + * Update on-disk file size now that data has been written to disk. The + * current in-memory file size is i_size. If a write is beyond eof i_new_size + * will be the intended file size until i_size is updated. If this write does + * not extend all the way to the valid file size then restrict this update to + * the end of the write. + * + * This function does not block as blocking on the inode lock in IO completion + * can lead to IO completion order dependency deadlocks.. If it can't get the + * inode ilock it will return EAGAIN. Callers must handle this. + */ +STATIC int +xfs_setfilesize( + xfs_ioend_t *ioend) +{ + xfs_inode_t *ip = XFS_I(ioend->io_inode); + xfs_fsize_t isize; + + if (unlikely(ioend->io_error)) + return 0; + + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) + return EAGAIN; + + isize = xfs_ioend_new_eof(ioend); + if (isize) { + trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); + ip->i_d.di_size = isize; + xfs_mark_inode_dirty(ip); + } + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return 0; +} + +/* + * Schedule IO completion handling on the final put of an ioend. + */ +STATIC void +xfs_finish_ioend( + struct xfs_ioend *ioend) +{ + if (atomic_dec_and_test(&ioend->io_remaining)) { + if (ioend->io_type == IO_UNWRITTEN) + queue_work(xfsconvertd_workqueue, &ioend->io_work); + else + queue_work(xfsdatad_workqueue, &ioend->io_work); + } +} + +/* + * IO write completion. + */ +STATIC void +xfs_end_io( + struct work_struct *work) +{ + xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); + struct xfs_inode *ip = XFS_I(ioend->io_inode); + int error = 0; + + /* + * For unwritten extents we need to issue transactions to convert a + * range to normal written extens after the data I/O has finished. + */ + if (ioend->io_type == IO_UNWRITTEN && + likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { + + error = xfs_iomap_write_unwritten(ip, ioend->io_offset, + ioend->io_size); + if (error) + ioend->io_error = error; + } + + /* + * We might have to update the on-disk file size after extending + * writes. + */ + error = xfs_setfilesize(ioend); + ASSERT(!error || error == EAGAIN); + + /* + * If we didn't complete processing of the ioend, requeue it to the + * tail of the workqueue for another attempt later. Otherwise destroy + * it. + */ + if (error == EAGAIN) { + atomic_inc(&ioend->io_remaining); + xfs_finish_ioend(ioend); + /* ensure we don't spin on blocked ioends */ + delay(1); + } else { + if (ioend->io_iocb) + aio_complete(ioend->io_iocb, ioend->io_result, 0); + xfs_destroy_ioend(ioend); + } +} + +/* + * Call IO completion handling in caller context on the final put of an ioend. + */ +STATIC void +xfs_finish_ioend_sync( + struct xfs_ioend *ioend) +{ + if (atomic_dec_and_test(&ioend->io_remaining)) + xfs_end_io(&ioend->io_work); +} + +/* + * Allocate and initialise an IO completion structure. + * We need to track unwritten extent write completion here initially. + * We'll need to extend this for updating the ondisk inode size later + * (vs. incore size). + */ +STATIC xfs_ioend_t * +xfs_alloc_ioend( + struct inode *inode, + unsigned int type) +{ + xfs_ioend_t *ioend; + + ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); + + /* + * Set the count to 1 initially, which will prevent an I/O + * completion callback from happening before we have started + * all the I/O from calling the completion routine too early. + */ + atomic_set(&ioend->io_remaining, 1); + ioend->io_error = 0; + ioend->io_list = NULL; + ioend->io_type = type; + ioend->io_inode = inode; + ioend->io_buffer_head = NULL; + ioend->io_buffer_tail = NULL; + atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); + ioend->io_offset = 0; + ioend->io_size = 0; + ioend->io_iocb = NULL; + ioend->io_result = 0; + + INIT_WORK(&ioend->io_work, xfs_end_io); + return ioend; +} + +STATIC int +xfs_map_blocks( + struct inode *inode, + loff_t offset, + struct xfs_bmbt_irec *imap, + int type, + int nonblocking) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + ssize_t count = 1 << inode->i_blkbits; + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int bmapi_flags = XFS_BMAPI_ENTIRE; + int nimaps = 1; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + if (type == IO_UNWRITTEN) + bmapi_flags |= XFS_BMAPI_IGSTATE; + + if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + if (nonblocking) + return -XFS_ERROR(EAGAIN); + xfs_ilock(ip, XFS_ILOCK_SHARED); + } + + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + (ip->i_df.if_flags & XFS_IFEXTENTS)); + ASSERT(offset <= mp->m_maxioffset); + + if (offset + count > mp->m_maxioffset) + count = mp->m_maxioffset - offset; + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); + offset_fsb = XFS_B_TO_FSBT(mp, offset); + error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, + bmapi_flags, NULL, 0, imap, &nimaps, NULL); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (error) + return -XFS_ERROR(error); + + if (type == IO_DELALLOC && + (!nimaps || isnullstartblock(imap->br_startblock))) { + error = xfs_iomap_write_allocate(ip, offset, count, imap); + if (!error) + trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); + return -XFS_ERROR(error); + } + +#ifdef DEBUG + if (type == IO_UNWRITTEN) { + ASSERT(nimaps); + ASSERT(imap->br_startblock != HOLESTARTBLOCK); + ASSERT(imap->br_startblock != DELAYSTARTBLOCK); + } +#endif + if (nimaps) + trace_xfs_map_blocks_found(ip, offset, count, type, imap); + return 0; +} + +STATIC int +xfs_imap_valid( + struct inode *inode, + struct xfs_bmbt_irec *imap, + xfs_off_t offset) +{ + offset >>= inode->i_blkbits; + + return offset >= imap->br_startoff && + offset < imap->br_startoff + imap->br_blockcount; +} + +/* + * BIO completion handler for buffered IO. + */ +STATIC void +xfs_end_bio( + struct bio *bio, + int error) +{ + xfs_ioend_t *ioend = bio->bi_private; + + ASSERT(atomic_read(&bio->bi_cnt) >= 1); + ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; + + /* Toss bio and pass work off to an xfsdatad thread */ + bio->bi_private = NULL; + bio->bi_end_io = NULL; + bio_put(bio); + + xfs_finish_ioend(ioend); +} + +STATIC void +xfs_submit_ioend_bio( + struct writeback_control *wbc, + xfs_ioend_t *ioend, + struct bio *bio) +{ + atomic_inc(&ioend->io_remaining); + bio->bi_private = ioend; + bio->bi_end_io = xfs_end_bio; + + /* + * If the I/O is beyond EOF we mark the inode dirty immediately + * but don't update the inode size until I/O completion. + */ + if (xfs_ioend_new_eof(ioend)) + xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); + + submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); +} + +STATIC struct bio * +xfs_alloc_ioend_bio( + struct buffer_head *bh) +{ + int nvecs = bio_get_nr_vecs(bh->b_bdev); + struct bio *bio = bio_alloc(GFP_NOIO, nvecs); + + ASSERT(bio->bi_private == NULL); + bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); + bio->bi_bdev = bh->b_bdev; + return bio; +} + +STATIC void +xfs_start_buffer_writeback( + struct buffer_head *bh) +{ + ASSERT(buffer_mapped(bh)); + ASSERT(buffer_locked(bh)); + ASSERT(!buffer_delay(bh)); + ASSERT(!buffer_unwritten(bh)); + + mark_buffer_async_write(bh); + set_buffer_uptodate(bh); + clear_buffer_dirty(bh); +} + +STATIC void +xfs_start_page_writeback( + struct page *page, + int clear_dirty, + int buffers) +{ + ASSERT(PageLocked(page)); + ASSERT(!PageWriteback(page)); + if (clear_dirty) + clear_page_dirty_for_io(page); + set_page_writeback(page); + unlock_page(page); + /* If no buffers on the page are to be written, finish it here */ + if (!buffers) + end_page_writeback(page); +} + +static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) +{ + return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); +} + +/* + * Submit all of the bios for all of the ioends we have saved up, covering the + * initial writepage page and also any probed pages. + * + * Because we may have multiple ioends spanning a page, we need to start + * writeback on all the buffers before we submit them for I/O. If we mark the + * buffers as we got, then we can end up with a page that only has buffers + * marked async write and I/O complete on can occur before we mark the other + * buffers async write. + * + * The end result of this is that we trip a bug in end_page_writeback() because + * we call it twice for the one page as the code in end_buffer_async_write() + * assumes that all buffers on the page are started at the same time. + * + * The fix is two passes across the ioend list - one to start writeback on the + * buffer_heads, and then submit them for I/O on the second pass. + */ +STATIC void +xfs_submit_ioend( + struct writeback_control *wbc, + xfs_ioend_t *ioend) +{ + xfs_ioend_t *head = ioend; + xfs_ioend_t *next; + struct buffer_head *bh; + struct bio *bio; + sector_t lastblock = 0; + + /* Pass 1 - start writeback */ + do { + next = ioend->io_list; + for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) + xfs_start_buffer_writeback(bh); + } while ((ioend = next) != NULL); + + /* Pass 2 - submit I/O */ + ioend = head; + do { + next = ioend->io_list; + bio = NULL; + + for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { + + if (!bio) { + retry: + bio = xfs_alloc_ioend_bio(bh); + } else if (bh->b_blocknr != lastblock + 1) { + xfs_submit_ioend_bio(wbc, ioend, bio); + goto retry; + } + + if (bio_add_buffer(bio, bh) != bh->b_size) { + xfs_submit_ioend_bio(wbc, ioend, bio); + goto retry; + } + + lastblock = bh->b_blocknr; + } + if (bio) + xfs_submit_ioend_bio(wbc, ioend, bio); + xfs_finish_ioend(ioend); + } while ((ioend = next) != NULL); +} + +/* + * Cancel submission of all buffer_heads so far in this endio. + * Toss the endio too. Only ever called for the initial page + * in a writepage request, so only ever one page. + */ +STATIC void +xfs_cancel_ioend( + xfs_ioend_t *ioend) +{ + xfs_ioend_t *next; + struct buffer_head *bh, *next_bh; + + do { + next = ioend->io_list; + bh = ioend->io_buffer_head; + do { + next_bh = bh->b_private; + clear_buffer_async_write(bh); + unlock_buffer(bh); + } while ((bh = next_bh) != NULL); + + xfs_ioend_wake(XFS_I(ioend->io_inode)); + mempool_free(ioend, xfs_ioend_pool); + } while ((ioend = next) != NULL); +} + +/* + * Test to see if we've been building up a completion structure for + * earlier buffers -- if so, we try to append to this ioend if we + * can, otherwise we finish off any current ioend and start another. + * Return true if we've finished the given ioend. + */ +STATIC void +xfs_add_to_ioend( + struct inode *inode, + struct buffer_head *bh, + xfs_off_t offset, + unsigned int type, + xfs_ioend_t **result, + int need_ioend) +{ + xfs_ioend_t *ioend = *result; + + if (!ioend || need_ioend || type != ioend->io_type) { + xfs_ioend_t *previous = *result; + + ioend = xfs_alloc_ioend(inode, type); + ioend->io_offset = offset; + ioend->io_buffer_head = bh; + ioend->io_buffer_tail = bh; + if (previous) + previous->io_list = ioend; + *result = ioend; + } else { + ioend->io_buffer_tail->b_private = bh; + ioend->io_buffer_tail = bh; + } + + bh->b_private = NULL; + ioend->io_size += bh->b_size; +} + +STATIC void +xfs_map_buffer( + struct inode *inode, + struct buffer_head *bh, + struct xfs_bmbt_irec *imap, + xfs_off_t offset) +{ + sector_t bn; + struct xfs_mount *m = XFS_I(inode)->i_mount; + xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); + xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); + + ASSERT(imap->br_startblock != HOLESTARTBLOCK); + ASSERT(imap->br_startblock != DELAYSTARTBLOCK); + + bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + + ((offset - iomap_offset) >> inode->i_blkbits); + + ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); + + bh->b_blocknr = bn; + set_buffer_mapped(bh); +} + +STATIC void +xfs_map_at_offset( + struct inode *inode, + struct buffer_head *bh, + struct xfs_bmbt_irec *imap, + xfs_off_t offset) +{ + ASSERT(imap->br_startblock != HOLESTARTBLOCK); + ASSERT(imap->br_startblock != DELAYSTARTBLOCK); + + xfs_map_buffer(inode, bh, imap, offset); + set_buffer_mapped(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); +} + +/* + * Test if a given page is suitable for writing as part of an unwritten + * or delayed allocate extent. + */ +STATIC int +xfs_is_delayed_page( + struct page *page, + unsigned int type) +{ + if (PageWriteback(page)) + return 0; + + if (page->mapping && page_has_buffers(page)) { + struct buffer_head *bh, *head; + int acceptable = 0; + + bh = head = page_buffers(page); + do { + if (buffer_unwritten(bh)) + acceptable = (type == IO_UNWRITTEN); + else if (buffer_delay(bh)) + acceptable = (type == IO_DELALLOC); + else if (buffer_dirty(bh) && buffer_mapped(bh)) + acceptable = (type == IO_OVERWRITE); + else + break; + } while ((bh = bh->b_this_page) != head); + + if (acceptable) + return 1; + } + + return 0; +} + +/* + * Allocate & map buffers for page given the extent map. Write it out. + * except for the original page of a writepage, this is called on + * delalloc/unwritten pages only, for the original page it is possible + * that the page has no mapping at all. + */ +STATIC int +xfs_convert_page( + struct inode *inode, + struct page *page, + loff_t tindex, + struct xfs_bmbt_irec *imap, + xfs_ioend_t **ioendp, + struct writeback_control *wbc) +{ + struct buffer_head *bh, *head; + xfs_off_t end_offset; + unsigned long p_offset; + unsigned int type; + int len, page_dirty; + int count = 0, done = 0, uptodate = 1; + xfs_off_t offset = page_offset(page); + + if (page->index != tindex) + goto fail; + if (!trylock_page(page)) + goto fail; + if (PageWriteback(page)) + goto fail_unlock_page; + if (page->mapping != inode->i_mapping) + goto fail_unlock_page; + if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) + goto fail_unlock_page; + + /* + * page_dirty is initially a count of buffers on the page before + * EOF and is decremented as we move each into a cleanable state. + * + * Derivation: + * + * End offset is the highest offset that this page should represent. + * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) + * will evaluate non-zero and be less than PAGE_CACHE_SIZE and + * hence give us the correct page_dirty count. On any other page, + * it will be zero and in that case we need page_dirty to be the + * count of buffers on the page. + */ + end_offset = min_t(unsigned long long, + (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, + i_size_read(inode)); + + len = 1 << inode->i_blkbits; + p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), + PAGE_CACHE_SIZE); + p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; + page_dirty = p_offset / len; + + bh = head = page_buffers(page); + do { + if (offset >= end_offset) + break; + if (!buffer_uptodate(bh)) + uptodate = 0; + if (!(PageUptodate(page) || buffer_uptodate(bh))) { + done = 1; + continue; + } + + if (buffer_unwritten(bh) || buffer_delay(bh) || + buffer_mapped(bh)) { + if (buffer_unwritten(bh)) + type = IO_UNWRITTEN; + else if (buffer_delay(bh)) + type = IO_DELALLOC; + else + type = IO_OVERWRITE; + + if (!xfs_imap_valid(inode, imap, offset)) { + done = 1; + continue; + } + + lock_buffer(bh); + if (type != IO_OVERWRITE) + xfs_map_at_offset(inode, bh, imap, offset); + xfs_add_to_ioend(inode, bh, offset, type, + ioendp, done); + + page_dirty--; + count++; + } else { + done = 1; + } + } while (offset += len, (bh = bh->b_this_page) != head); + + if (uptodate && bh == head) + SetPageUptodate(page); + + if (count) { + if (--wbc->nr_to_write <= 0 && + wbc->sync_mode == WB_SYNC_NONE) + done = 1; + } + xfs_start_page_writeback(page, !page_dirty, count); + + return done; + fail_unlock_page: + unlock_page(page); + fail: + return 1; +} + +/* + * Convert & write out a cluster of pages in the same extent as defined + * by mp and following the start page. + */ +STATIC void +xfs_cluster_write( + struct inode *inode, + pgoff_t tindex, + struct xfs_bmbt_irec *imap, + xfs_ioend_t **ioendp, + struct writeback_control *wbc, + pgoff_t tlast) +{ + struct pagevec pvec; + int done = 0, i; + + pagevec_init(&pvec, 0); + while (!done && tindex <= tlast) { + unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); + + if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) + break; + + for (i = 0; i < pagevec_count(&pvec); i++) { + done = xfs_convert_page(inode, pvec.pages[i], tindex++, + imap, ioendp, wbc); + if (done) + break; + } + + pagevec_release(&pvec); + cond_resched(); + } +} + +STATIC void +xfs_vm_invalidatepage( + struct page *page, + unsigned long offset) +{ + trace_xfs_invalidatepage(page->mapping->host, page, offset); + block_invalidatepage(page, offset); +} + +/* + * If the page has delalloc buffers on it, we need to punch them out before we + * invalidate the page. If we don't, we leave a stale delalloc mapping on the + * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read + * is done on that same region - the delalloc extent is returned when none is + * supposed to be there. + * + * We prevent this by truncating away the delalloc regions on the page before + * invalidating it. Because they are delalloc, we can do this without needing a + * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this + * truncation without a transaction as there is no space left for block + * reservation (typically why we see a ENOSPC in writeback). + * + * This is not a performance critical path, so for now just do the punching a + * buffer head at a time. + */ +STATIC void +xfs_aops_discard_page( + struct page *page) +{ + struct inode *inode = page->mapping->host; + struct xfs_inode *ip = XFS_I(inode); + struct buffer_head *bh, *head; + loff_t offset = page_offset(page); + + if (!xfs_is_delayed_page(page, IO_DELALLOC)) + goto out_invalidate; + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + goto out_invalidate; + + xfs_alert(ip->i_mount, + "page discard on page %p, inode 0x%llx, offset %llu.", + page, ip->i_ino, offset); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + bh = head = page_buffers(page); + do { + int error; + xfs_fileoff_t start_fsb; + + if (!buffer_delay(bh)) + goto next_buffer; + + start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); + error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1); + if (error) { + /* something screwed, just bail */ + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_alert(ip->i_mount, + "page discard unable to remove delalloc mapping."); + } + break; + } +next_buffer: + offset += 1 << inode->i_blkbits; + + } while ((bh = bh->b_this_page) != head); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); +out_invalidate: + xfs_vm_invalidatepage(page, 0); + return; +} + +/* + * Write out a dirty page. + * + * For delalloc space on the page we need to allocate space and flush it. + * For unwritten space on the page we need to start the conversion to + * regular allocated space. + * For any other dirty buffer heads on the page we should flush them. + */ +STATIC int +xfs_vm_writepage( + struct page *page, + struct writeback_control *wbc) +{ + struct inode *inode = page->mapping->host; + struct buffer_head *bh, *head; + struct xfs_bmbt_irec imap; + xfs_ioend_t *ioend = NULL, *iohead = NULL; + loff_t offset; + unsigned int type; + __uint64_t end_offset; + pgoff_t end_index, last_index; + ssize_t len; + int err, imap_valid = 0, uptodate = 1; + int count = 0; + int nonblocking = 0; + + trace_xfs_writepage(inode, page, 0); + + ASSERT(page_has_buffers(page)); + + /* + * Refuse to write the page out if we are called from reclaim context. + * + * This avoids stack overflows when called from deeply used stacks in + * random callers for direct reclaim or memcg reclaim. We explicitly + * allow reclaim from kswapd as the stack usage there is relatively low. + * + * This should really be done by the core VM, but until that happens + * filesystems like XFS, btrfs and ext4 have to take care of this + * by themselves. + */ + if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) + goto redirty; + + /* + * Given that we do not allow direct reclaim to call us, we should + * never be called while in a filesystem transaction. + */ + if (WARN_ON(current->flags & PF_FSTRANS)) + goto redirty; + + /* Is this page beyond the end of the file? */ + offset = i_size_read(inode); + end_index = offset >> PAGE_CACHE_SHIFT; + last_index = (offset - 1) >> PAGE_CACHE_SHIFT; + if (page->index >= end_index) { + if ((page->index >= end_index + 1) || + !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { + unlock_page(page); + return 0; + } + } + + end_offset = min_t(unsigned long long, + (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, + offset); + len = 1 << inode->i_blkbits; + + bh = head = page_buffers(page); + offset = page_offset(page); + type = IO_OVERWRITE; + + if (wbc->sync_mode == WB_SYNC_NONE) + nonblocking = 1; + + do { + int new_ioend = 0; + + if (offset >= end_offset) + break; + if (!buffer_uptodate(bh)) + uptodate = 0; + + /* + * set_page_dirty dirties all buffers in a page, independent + * of their state. The dirty state however is entirely + * meaningless for holes (!mapped && uptodate), so skip + * buffers covering holes here. + */ + if (!buffer_mapped(bh) && buffer_uptodate(bh)) { + imap_valid = 0; + continue; + } + + if (buffer_unwritten(bh)) { + if (type != IO_UNWRITTEN) { + type = IO_UNWRITTEN; + imap_valid = 0; + } + } else if (buffer_delay(bh)) { + if (type != IO_DELALLOC) { + type = IO_DELALLOC; + imap_valid = 0; + } + } else if (buffer_uptodate(bh)) { + if (type != IO_OVERWRITE) { + type = IO_OVERWRITE; + imap_valid = 0; + } + } else { + if (PageUptodate(page)) { + ASSERT(buffer_mapped(bh)); + imap_valid = 0; + } + continue; + } + + if (imap_valid) + imap_valid = xfs_imap_valid(inode, &imap, offset); + if (!imap_valid) { + /* + * If we didn't have a valid mapping then we need to + * put the new mapping into a separate ioend structure. + * This ensures non-contiguous extents always have + * separate ioends, which is particularly important + * for unwritten extent conversion at I/O completion + * time. + */ + new_ioend = 1; + err = xfs_map_blocks(inode, offset, &imap, type, + nonblocking); + if (err) + goto error; + imap_valid = xfs_imap_valid(inode, &imap, offset); + } + if (imap_valid) { + lock_buffer(bh); + if (type != IO_OVERWRITE) + xfs_map_at_offset(inode, bh, &imap, offset); + xfs_add_to_ioend(inode, bh, offset, type, &ioend, + new_ioend); + count++; + } + + if (!iohead) + iohead = ioend; + + } while (offset += len, ((bh = bh->b_this_page) != head)); + + if (uptodate && bh == head) + SetPageUptodate(page); + + xfs_start_page_writeback(page, 1, count); + + if (ioend && imap_valid) { + xfs_off_t end_index; + + end_index = imap.br_startoff + imap.br_blockcount; + + /* to bytes */ + end_index <<= inode->i_blkbits; + + /* to pages */ + end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; + + /* check against file size */ + if (end_index > last_index) + end_index = last_index; + + xfs_cluster_write(inode, page->index + 1, &imap, &ioend, + wbc, end_index); + } + + if (iohead) + xfs_submit_ioend(wbc, iohead); + + return 0; + +error: + if (iohead) + xfs_cancel_ioend(iohead); + + if (err == -EAGAIN) + goto redirty; + + xfs_aops_discard_page(page); + ClearPageUptodate(page); + unlock_page(page); + return err; + +redirty: + redirty_page_for_writepage(wbc, page); + unlock_page(page); + return 0; +} + +STATIC int +xfs_vm_writepages( + struct address_space *mapping, + struct writeback_control *wbc) +{ + xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); + return generic_writepages(mapping, wbc); +} + +/* + * Called to move a page into cleanable state - and from there + * to be released. The page should already be clean. We always + * have buffer heads in this call. + * + * Returns 1 if the page is ok to release, 0 otherwise. + */ +STATIC int +xfs_vm_releasepage( + struct page *page, + gfp_t gfp_mask) +{ + int delalloc, unwritten; + + trace_xfs_releasepage(page->mapping->host, page, 0); + + xfs_count_page_state(page, &delalloc, &unwritten); + + if (WARN_ON(delalloc)) + return 0; + if (WARN_ON(unwritten)) + return 0; + + return try_to_free_buffers(page); +} + +STATIC int +__xfs_get_blocks( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create, + int direct) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int lockmode = 0; + struct xfs_bmbt_irec imap; + int nimaps = 1; + xfs_off_t offset; + ssize_t size; + int new = 0; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + offset = (xfs_off_t)iblock << inode->i_blkbits; + ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); + size = bh_result->b_size; + + if (!create && direct && offset >= i_size_read(inode)) + return 0; + + if (create) { + lockmode = XFS_ILOCK_EXCL; + xfs_ilock(ip, lockmode); + } else { + lockmode = xfs_ilock_map_shared(ip); + } + + ASSERT(offset <= mp->m_maxioffset); + if (offset + size > mp->m_maxioffset) + size = mp->m_maxioffset - offset; + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); + offset_fsb = XFS_B_TO_FSBT(mp, offset); + + error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, + XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL); + if (error) + goto out_unlock; + + if (create && + (!nimaps || + (imap.br_startblock == HOLESTARTBLOCK || + imap.br_startblock == DELAYSTARTBLOCK))) { + if (direct) { + error = xfs_iomap_write_direct(ip, offset, size, + &imap, nimaps); + } else { + error = xfs_iomap_write_delay(ip, offset, size, &imap); + } + if (error) + goto out_unlock; + + trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); + } else if (nimaps) { + trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); + } else { + trace_xfs_get_blocks_notfound(ip, offset, size); + goto out_unlock; + } + xfs_iunlock(ip, lockmode); + + if (imap.br_startblock != HOLESTARTBLOCK && + imap.br_startblock != DELAYSTARTBLOCK) { + /* + * For unwritten extents do not report a disk address on + * the read case (treat as if we're reading into a hole). + */ + if (create || !ISUNWRITTEN(&imap)) + xfs_map_buffer(inode, bh_result, &imap, offset); + if (create && ISUNWRITTEN(&imap)) { + if (direct) + bh_result->b_private = inode; + set_buffer_unwritten(bh_result); + } + } + + /* + * If this is a realtime file, data may be on a different device. + * to that pointed to from the buffer_head b_bdev currently. + */ + bh_result->b_bdev = xfs_find_bdev_for_inode(inode); + + /* + * If we previously allocated a block out beyond eof and we are now + * coming back to use it then we will need to flag it as new even if it + * has a disk address. + * + * With sub-block writes into unwritten extents we also need to mark + * the buffer as new so that the unwritten parts of the buffer gets + * correctly zeroed. + */ + if (create && + ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || + (offset >= i_size_read(inode)) || + (new || ISUNWRITTEN(&imap)))) + set_buffer_new(bh_result); + + if (imap.br_startblock == DELAYSTARTBLOCK) { + BUG_ON(direct); + if (create) { + set_buffer_uptodate(bh_result); + set_buffer_mapped(bh_result); + set_buffer_delay(bh_result); + } + } + + /* + * If this is O_DIRECT or the mpage code calling tell them how large + * the mapping is, so that we can avoid repeated get_blocks calls. + */ + if (direct || size > (1 << inode->i_blkbits)) { + xfs_off_t mapping_size; + + mapping_size = imap.br_startoff + imap.br_blockcount - iblock; + mapping_size <<= inode->i_blkbits; + + ASSERT(mapping_size > 0); + if (mapping_size > size) + mapping_size = size; + if (mapping_size > LONG_MAX) + mapping_size = LONG_MAX; + + bh_result->b_size = mapping_size; + } + + return 0; + +out_unlock: + xfs_iunlock(ip, lockmode); + return -error; +} + +int +xfs_get_blocks( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return __xfs_get_blocks(inode, iblock, bh_result, create, 0); +} + +STATIC int +xfs_get_blocks_direct( + struct inode *inode, + sector_t iblock, + struct buffer_head *bh_result, + int create) +{ + return __xfs_get_blocks(inode, iblock, bh_result, create, 1); +} + +/* + * Complete a direct I/O write request. + * + * If the private argument is non-NULL __xfs_get_blocks signals us that we + * need to issue a transaction to convert the range from unwritten to written + * extents. In case this is regular synchronous I/O we just call xfs_end_io + * to do this and we are done. But in case this was a successful AIO + * request this handler is called from interrupt context, from which we + * can't start transactions. In that case offload the I/O completion to + * the workqueues we also use for buffered I/O completion. + */ +STATIC void +xfs_end_io_direct_write( + struct kiocb *iocb, + loff_t offset, + ssize_t size, + void *private, + int ret, + bool is_async) +{ + struct xfs_ioend *ioend = iocb->private; + + /* + * blockdev_direct_IO can return an error even after the I/O + * completion handler was called. Thus we need to protect + * against double-freeing. + */ + iocb->private = NULL; + + ioend->io_offset = offset; + ioend->io_size = size; + if (private && size > 0) + ioend->io_type = IO_UNWRITTEN; + + if (is_async) { + /* + * If we are converting an unwritten extent we need to delay + * the AIO completion until after the unwrittent extent + * conversion has completed, otherwise do it ASAP. + */ + if (ioend->io_type == IO_UNWRITTEN) { + ioend->io_iocb = iocb; + ioend->io_result = ret; + } else { + aio_complete(iocb, ret, 0); + } + xfs_finish_ioend(ioend); + } else { + xfs_finish_ioend_sync(ioend); + } + + /* XXX: probably should move into the real I/O completion handler */ + inode_dio_done(ioend->io_inode); +} + +STATIC ssize_t +xfs_vm_direct_IO( + int rw, + struct kiocb *iocb, + const struct iovec *iov, + loff_t offset, + unsigned long nr_segs) +{ + struct inode *inode = iocb->ki_filp->f_mapping->host; + struct block_device *bdev = xfs_find_bdev_for_inode(inode); + ssize_t ret; + + if (rw & WRITE) { + iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); + + ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, + offset, nr_segs, + xfs_get_blocks_direct, + xfs_end_io_direct_write, NULL, 0); + if (ret != -EIOCBQUEUED && iocb->private) + xfs_destroy_ioend(iocb->private); + } else { + ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, + offset, nr_segs, + xfs_get_blocks_direct, + NULL, NULL, 0); + } + + return ret; +} + +STATIC void +xfs_vm_write_failed( + struct address_space *mapping, + loff_t to) +{ + struct inode *inode = mapping->host; + + if (to > inode->i_size) { + /* + * punch out the delalloc blocks we have already allocated. We + * don't call xfs_setattr() to do this as we may be in the + * middle of a multi-iovec write and so the vfs inode->i_size + * will not match the xfs ip->i_size and so it will zero too + * much. Hence we jus truncate the page cache to zero what is + * necessary and punch the delalloc blocks directly. + */ + struct xfs_inode *ip = XFS_I(inode); + xfs_fileoff_t start_fsb; + xfs_fileoff_t end_fsb; + int error; + + truncate_pagecache(inode, to, inode->i_size); + + /* + * Check if there are any blocks that are outside of i_size + * that need to be trimmed back. + */ + start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1; + end_fsb = XFS_B_TO_FSB(ip->i_mount, to); + if (end_fsb <= start_fsb) + return; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = xfs_bmap_punch_delalloc_range(ip, start_fsb, + end_fsb - start_fsb); + if (error) { + /* something screwed, just bail */ + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_alert(ip->i_mount, + "xfs_vm_write_failed: unable to clean up ino %lld", + ip->i_ino); + } + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } +} + +STATIC int +xfs_vm_write_begin( + struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned flags, + struct page **pagep, + void **fsdata) +{ + int ret; + + ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, + pagep, xfs_get_blocks); + if (unlikely(ret)) + xfs_vm_write_failed(mapping, pos + len); + return ret; +} + +STATIC int +xfs_vm_write_end( + struct file *file, + struct address_space *mapping, + loff_t pos, + unsigned len, + unsigned copied, + struct page *page, + void *fsdata) +{ + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + if (unlikely(ret < len)) + xfs_vm_write_failed(mapping, pos + len); + return ret; +} + +STATIC sector_t +xfs_vm_bmap( + struct address_space *mapping, + sector_t block) +{ + struct inode *inode = (struct inode *)mapping->host; + struct xfs_inode *ip = XFS_I(inode); + + trace_xfs_vm_bmap(XFS_I(inode)); + xfs_ilock(ip, XFS_IOLOCK_SHARED); + xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + return generic_block_bmap(mapping, block, xfs_get_blocks); +} + +STATIC int +xfs_vm_readpage( + struct file *unused, + struct page *page) +{ + return mpage_readpage(page, xfs_get_blocks); +} + +STATIC int +xfs_vm_readpages( + struct file *unused, + struct address_space *mapping, + struct list_head *pages, + unsigned nr_pages) +{ + return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); +} + +const struct address_space_operations xfs_address_space_operations = { + .readpage = xfs_vm_readpage, + .readpages = xfs_vm_readpages, + .writepage = xfs_vm_writepage, + .writepages = xfs_vm_writepages, + .releasepage = xfs_vm_releasepage, + .invalidatepage = xfs_vm_invalidatepage, + .write_begin = xfs_vm_write_begin, + .write_end = xfs_vm_write_end, + .bmap = xfs_vm_bmap, + .direct_IO = xfs_vm_direct_IO, + .migratepage = buffer_migrate_page, + .is_partially_uptodate = block_is_partially_uptodate, + .error_remove_page = generic_error_remove_page, +}; diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h new file mode 100644 index 0000000..71f721e --- /dev/null +++ b/fs/xfs/xfs_aops.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2005-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_AOPS_H__ +#define __XFS_AOPS_H__ + +extern struct workqueue_struct *xfsdatad_workqueue; +extern struct workqueue_struct *xfsconvertd_workqueue; +extern mempool_t *xfs_ioend_pool; + +/* + * Types of I/O for bmap clustering and I/O completion tracking. + */ +enum { + IO_DIRECT = 0, /* special case for direct I/O ioends */ + IO_DELALLOC, /* mapping covers delalloc region */ + IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ + IO_OVERWRITE, /* mapping covers already allocated extent */ +}; + +#define XFS_IO_TYPES \ + { 0, "" }, \ + { IO_DELALLOC, "delalloc" }, \ + { IO_UNWRITTEN, "unwritten" }, \ + { IO_OVERWRITE, "overwrite" } + +/* + * xfs_ioend struct manages large extent writes for XFS. + * It can manage several multi-page bio's at once. + */ +typedef struct xfs_ioend { + struct xfs_ioend *io_list; /* next ioend in chain */ + unsigned int io_type; /* delalloc / unwritten */ + int io_error; /* I/O error code */ + atomic_t io_remaining; /* hold count */ + struct inode *io_inode; /* file being written to */ + struct buffer_head *io_buffer_head;/* buffer linked list head */ + struct buffer_head *io_buffer_tail;/* buffer linked list tail */ + size_t io_size; /* size of the extent */ + xfs_off_t io_offset; /* offset in the file */ + struct work_struct io_work; /* xfsdatad work queue */ + struct kiocb *io_iocb; + int io_result; +} xfs_ioend_t; + +extern const struct address_space_operations xfs_address_space_operations; +extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); + +extern void xfs_ioend_init(void); +extern void xfs_ioend_wait(struct xfs_inode *); + +extern void xfs_count_page_state(struct page *, int *, int *); + +#endif /* __XFS_AOPS_H__ */ diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c new file mode 100644 index 0000000..c57836d --- /dev/null +++ b/fs/xfs/xfs_buf.c @@ -0,0 +1,1876 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xfs_sb.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_trace.h" + +static kmem_zone_t *xfs_buf_zone; +STATIC int xfsbufd(void *); +STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); + +static struct workqueue_struct *xfslogd_workqueue; +struct workqueue_struct *xfsdatad_workqueue; +struct workqueue_struct *xfsconvertd_workqueue; + +#ifdef XFS_BUF_LOCK_TRACKING +# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) +# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) +# define XB_GET_OWNER(bp) ((bp)->b_last_holder) +#else +# define XB_SET_OWNER(bp) do { } while (0) +# define XB_CLEAR_OWNER(bp) do { } while (0) +# define XB_GET_OWNER(bp) do { } while (0) +#endif + +#define xb_to_gfp(flags) \ + ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \ + ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) + +#define xb_to_km(flags) \ + (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) + +#define xfs_buf_allocate(flags) \ + kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags)) +#define xfs_buf_deallocate(bp) \ + kmem_zone_free(xfs_buf_zone, (bp)); + +static inline int +xfs_buf_is_vmapped( + struct xfs_buf *bp) +{ + /* + * Return true if the buffer is vmapped. + * + * The XBF_MAPPED flag is set if the buffer should be mapped, but the + * code is clever enough to know it doesn't have to map a single page, + * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1. + */ + return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1; +} + +static inline int +xfs_buf_vmap_len( + struct xfs_buf *bp) +{ + return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; +} + +/* + * xfs_buf_lru_add - add a buffer to the LRU. + * + * The LRU takes a new reference to the buffer so that it will only be freed + * once the shrinker takes the buffer off the LRU. + */ +STATIC void +xfs_buf_lru_add( + struct xfs_buf *bp) +{ + struct xfs_buftarg *btp = bp->b_target; + + spin_lock(&btp->bt_lru_lock); + if (list_empty(&bp->b_lru)) { + atomic_inc(&bp->b_hold); + list_add_tail(&bp->b_lru, &btp->bt_lru); + btp->bt_lru_nr++; + } + spin_unlock(&btp->bt_lru_lock); +} + +/* + * xfs_buf_lru_del - remove a buffer from the LRU + * + * The unlocked check is safe here because it only occurs when there are not + * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there + * to optimise the shrinker removing the buffer from the LRU and calling + * xfs_buf_free(). i.e. it removes an unnecessary round trip on the + * bt_lru_lock. + */ +STATIC void +xfs_buf_lru_del( + struct xfs_buf *bp) +{ + struct xfs_buftarg *btp = bp->b_target; + + if (list_empty(&bp->b_lru)) + return; + + spin_lock(&btp->bt_lru_lock); + if (!list_empty(&bp->b_lru)) { + list_del_init(&bp->b_lru); + btp->bt_lru_nr--; + } + spin_unlock(&btp->bt_lru_lock); +} + +/* + * When we mark a buffer stale, we remove the buffer from the LRU and clear the + * b_lru_ref count so that the buffer is freed immediately when the buffer + * reference count falls to zero. If the buffer is already on the LRU, we need + * to remove the reference that LRU holds on the buffer. + * + * This prevents build-up of stale buffers on the LRU. + */ +void +xfs_buf_stale( + struct xfs_buf *bp) +{ + bp->b_flags |= XBF_STALE; + atomic_set(&(bp)->b_lru_ref, 0); + if (!list_empty(&bp->b_lru)) { + struct xfs_buftarg *btp = bp->b_target; + + spin_lock(&btp->bt_lru_lock); + if (!list_empty(&bp->b_lru)) { + list_del_init(&bp->b_lru); + btp->bt_lru_nr--; + atomic_dec(&bp->b_hold); + } + spin_unlock(&btp->bt_lru_lock); + } + ASSERT(atomic_read(&bp->b_hold) >= 1); +} + +STATIC void +_xfs_buf_initialize( + xfs_buf_t *bp, + xfs_buftarg_t *target, + xfs_off_t range_base, + size_t range_length, + xfs_buf_flags_t flags) +{ + /* + * We don't want certain flags to appear in b_flags. + */ + flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD); + + memset(bp, 0, sizeof(xfs_buf_t)); + atomic_set(&bp->b_hold, 1); + atomic_set(&bp->b_lru_ref, 1); + init_completion(&bp->b_iowait); + INIT_LIST_HEAD(&bp->b_lru); + INIT_LIST_HEAD(&bp->b_list); + RB_CLEAR_NODE(&bp->b_rbnode); + sema_init(&bp->b_sema, 0); /* held, no waiters */ + XB_SET_OWNER(bp); + bp->b_target = target; + bp->b_file_offset = range_base; + /* + * Set buffer_length and count_desired to the same value initially. + * I/O routines should use count_desired, which will be the same in + * most cases but may be reset (e.g. XFS recovery). + */ + bp->b_buffer_length = bp->b_count_desired = range_length; + bp->b_flags = flags; + bp->b_bn = XFS_BUF_DADDR_NULL; + atomic_set(&bp->b_pin_count, 0); + init_waitqueue_head(&bp->b_waiters); + + XFS_STATS_INC(xb_create); + + trace_xfs_buf_init(bp, _RET_IP_); +} + +/* + * Allocate a page array capable of holding a specified number + * of pages, and point the page buf at it. + */ +STATIC int +_xfs_buf_get_pages( + xfs_buf_t *bp, + int page_count, + xfs_buf_flags_t flags) +{ + /* Make sure that we have a page list */ + if (bp->b_pages == NULL) { + bp->b_offset = xfs_buf_poff(bp->b_file_offset); + bp->b_page_count = page_count; + if (page_count <= XB_PAGES) { + bp->b_pages = bp->b_page_array; + } else { + bp->b_pages = kmem_alloc(sizeof(struct page *) * + page_count, xb_to_km(flags)); + if (bp->b_pages == NULL) + return -ENOMEM; + } + memset(bp->b_pages, 0, sizeof(struct page *) * page_count); + } + return 0; +} + +/* + * Frees b_pages if it was allocated. + */ +STATIC void +_xfs_buf_free_pages( + xfs_buf_t *bp) +{ + if (bp->b_pages != bp->b_page_array) { + kmem_free(bp->b_pages); + bp->b_pages = NULL; + } +} + +/* + * Releases the specified buffer. + * + * The modification state of any associated pages is left unchanged. + * The buffer most not be on any hash - use xfs_buf_rele instead for + * hashed and refcounted buffers + */ +void +xfs_buf_free( + xfs_buf_t *bp) +{ + trace_xfs_buf_free(bp, _RET_IP_); + + ASSERT(list_empty(&bp->b_lru)); + + if (bp->b_flags & _XBF_PAGES) { + uint i; + + if (xfs_buf_is_vmapped(bp)) + vm_unmap_ram(bp->b_addr - bp->b_offset, + bp->b_page_count); + + for (i = 0; i < bp->b_page_count; i++) { + struct page *page = bp->b_pages[i]; + + __free_page(page); + } + } else if (bp->b_flags & _XBF_KMEM) + kmem_free(bp->b_addr); + _xfs_buf_free_pages(bp); + xfs_buf_deallocate(bp); +} + +/* + * Allocates all the pages for buffer in question and builds it's page list. + */ +STATIC int +xfs_buf_allocate_memory( + xfs_buf_t *bp, + uint flags) +{ + size_t size = bp->b_count_desired; + size_t nbytes, offset; + gfp_t gfp_mask = xb_to_gfp(flags); + unsigned short page_count, i; + xfs_off_t end; + int error; + + /* + * for buffers that are contained within a single page, just allocate + * the memory from the heap - there's no need for the complexity of + * page arrays to keep allocation down to order 0. + */ + if (bp->b_buffer_length < PAGE_SIZE) { + bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags)); + if (!bp->b_addr) { + /* low memory - use alloc_page loop instead */ + goto use_alloc_page; + } + + if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) & + PAGE_MASK) != + ((unsigned long)bp->b_addr & PAGE_MASK)) { + /* b_addr spans two pages - use alloc_page instead */ + kmem_free(bp->b_addr); + bp->b_addr = NULL; + goto use_alloc_page; + } + bp->b_offset = offset_in_page(bp->b_addr); + bp->b_pages = bp->b_page_array; + bp->b_pages[0] = virt_to_page(bp->b_addr); + bp->b_page_count = 1; + bp->b_flags |= XBF_MAPPED | _XBF_KMEM; + return 0; + } + +use_alloc_page: + end = bp->b_file_offset + bp->b_buffer_length; + page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset); + error = _xfs_buf_get_pages(bp, page_count, flags); + if (unlikely(error)) + return error; + + offset = bp->b_offset; + bp->b_flags |= _XBF_PAGES; + + for (i = 0; i < bp->b_page_count; i++) { + struct page *page; + uint retries = 0; +retry: + page = alloc_page(gfp_mask); + if (unlikely(page == NULL)) { + if (flags & XBF_READ_AHEAD) { + bp->b_page_count = i; + error = ENOMEM; + goto out_free_pages; + } + + /* + * This could deadlock. + * + * But until all the XFS lowlevel code is revamped to + * handle buffer allocation failures we can't do much. + */ + if (!(++retries % 100)) + xfs_err(NULL, + "possible memory allocation deadlock in %s (mode:0x%x)", + __func__, gfp_mask); + + XFS_STATS_INC(xb_page_retries); + congestion_wait(BLK_RW_ASYNC, HZ/50); + goto retry; + } + + XFS_STATS_INC(xb_page_found); + + nbytes = min_t(size_t, size, PAGE_SIZE - offset); + size -= nbytes; + bp->b_pages[i] = page; + offset = 0; + } + return 0; + +out_free_pages: + for (i = 0; i < bp->b_page_count; i++) + __free_page(bp->b_pages[i]); + return error; +} + +/* + * Map buffer into kernel address-space if necessary. + */ +STATIC int +_xfs_buf_map_pages( + xfs_buf_t *bp, + uint flags) +{ + ASSERT(bp->b_flags & _XBF_PAGES); + if (bp->b_page_count == 1) { + /* A single page buffer is always mappable */ + bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; + bp->b_flags |= XBF_MAPPED; + } else if (flags & XBF_MAPPED) { + int retried = 0; + + do { + bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, + -1, PAGE_KERNEL); + if (bp->b_addr) + break; + vm_unmap_aliases(); + } while (retried++ <= 1); + + if (!bp->b_addr) + return -ENOMEM; + bp->b_addr += bp->b_offset; + bp->b_flags |= XBF_MAPPED; + } + + return 0; +} + +/* + * Finding and Reading Buffers + */ + +/* + * Look up, and creates if absent, a lockable buffer for + * a given range of an inode. The buffer is returned + * locked. If other overlapping buffers exist, they are + * released before the new buffer is created and locked, + * which may imply that this call will block until those buffers + * are unlocked. No I/O is implied by this call. + */ +xfs_buf_t * +_xfs_buf_find( + xfs_buftarg_t *btp, /* block device target */ + xfs_off_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + xfs_buf_flags_t flags, + xfs_buf_t *new_bp) +{ + xfs_off_t range_base; + size_t range_length; + struct xfs_perag *pag; + struct rb_node **rbp; + struct rb_node *parent; + xfs_buf_t *bp; + + range_base = (ioff << BBSHIFT); + range_length = (isize << BBSHIFT); + + /* Check for IOs smaller than the sector size / not sector aligned */ + ASSERT(!(range_length < (1 << btp->bt_sshift))); + ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); + + /* get tree root */ + pag = xfs_perag_get(btp->bt_mount, + xfs_daddr_to_agno(btp->bt_mount, ioff)); + + /* walk tree */ + spin_lock(&pag->pag_buf_lock); + rbp = &pag->pag_buf_tree.rb_node; + parent = NULL; + bp = NULL; + while (*rbp) { + parent = *rbp; + bp = rb_entry(parent, struct xfs_buf, b_rbnode); + + if (range_base < bp->b_file_offset) + rbp = &(*rbp)->rb_left; + else if (range_base > bp->b_file_offset) + rbp = &(*rbp)->rb_right; + else { + /* + * found a block offset match. If the range doesn't + * match, the only way this is allowed is if the buffer + * in the cache is stale and the transaction that made + * it stale has not yet committed. i.e. we are + * reallocating a busy extent. Skip this buffer and + * continue searching to the right for an exact match. + */ + if (bp->b_buffer_length != range_length) { + ASSERT(bp->b_flags & XBF_STALE); + rbp = &(*rbp)->rb_right; + continue; + } + atomic_inc(&bp->b_hold); + goto found; + } + } + + /* No match found */ + if (new_bp) { + _xfs_buf_initialize(new_bp, btp, range_base, + range_length, flags); + rb_link_node(&new_bp->b_rbnode, parent, rbp); + rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); + /* the buffer keeps the perag reference until it is freed */ + new_bp->b_pag = pag; + spin_unlock(&pag->pag_buf_lock); + } else { + XFS_STATS_INC(xb_miss_locked); + spin_unlock(&pag->pag_buf_lock); + xfs_perag_put(pag); + } + return new_bp; + +found: + spin_unlock(&pag->pag_buf_lock); + xfs_perag_put(pag); + + if (!xfs_buf_trylock(bp)) { + if (flags & XBF_TRYLOCK) { + xfs_buf_rele(bp); + XFS_STATS_INC(xb_busy_locked); + return NULL; + } + xfs_buf_lock(bp); + XFS_STATS_INC(xb_get_locked_waited); + } + + /* + * if the buffer is stale, clear all the external state associated with + * it. We need to keep flags such as how we allocated the buffer memory + * intact here. + */ + if (bp->b_flags & XBF_STALE) { + ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); + bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES; + } + + trace_xfs_buf_find(bp, flags, _RET_IP_); + XFS_STATS_INC(xb_get_locked); + return bp; +} + +/* + * Assembles a buffer covering the specified range. + * Storage in memory for all portions of the buffer will be allocated, + * although backing storage may not be. + */ +xfs_buf_t * +xfs_buf_get( + xfs_buftarg_t *target,/* target for buffer */ + xfs_off_t ioff, /* starting offset of range */ + size_t isize, /* length of range */ + xfs_buf_flags_t flags) +{ + xfs_buf_t *bp, *new_bp; + int error = 0; + + new_bp = xfs_buf_allocate(flags); + if (unlikely(!new_bp)) + return NULL; + + bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); + if (bp == new_bp) { + error = xfs_buf_allocate_memory(bp, flags); + if (error) + goto no_buffer; + } else { + xfs_buf_deallocate(new_bp); + if (unlikely(bp == NULL)) + return NULL; + } + + if (!(bp->b_flags & XBF_MAPPED)) { + error = _xfs_buf_map_pages(bp, flags); + if (unlikely(error)) { + xfs_warn(target->bt_mount, + "%s: failed to map pages\n", __func__); + goto no_buffer; + } + } + + XFS_STATS_INC(xb_get); + + /* + * Always fill in the block number now, the mapped cases can do + * their own overlay of this later. + */ + bp->b_bn = ioff; + bp->b_count_desired = bp->b_buffer_length; + + trace_xfs_buf_get(bp, flags, _RET_IP_); + return bp; + + no_buffer: + if (flags & (XBF_LOCK | XBF_TRYLOCK)) + xfs_buf_unlock(bp); + xfs_buf_rele(bp); + return NULL; +} + +STATIC int +_xfs_buf_read( + xfs_buf_t *bp, + xfs_buf_flags_t flags) +{ + int status; + + ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); + ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); + + bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD); + bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); + + status = xfs_buf_iorequest(bp); + if (status || bp->b_error || (flags & XBF_ASYNC)) + return status; + return xfs_buf_iowait(bp); +} + +xfs_buf_t * +xfs_buf_read( + xfs_buftarg_t *target, + xfs_off_t ioff, + size_t isize, + xfs_buf_flags_t flags) +{ + xfs_buf_t *bp; + + flags |= XBF_READ; + + bp = xfs_buf_get(target, ioff, isize, flags); + if (bp) { + trace_xfs_buf_read(bp, flags, _RET_IP_); + + if (!XFS_BUF_ISDONE(bp)) { + XFS_STATS_INC(xb_get_read); + _xfs_buf_read(bp, flags); + } else if (flags & XBF_ASYNC) { + /* + * Read ahead call which is already satisfied, + * drop the buffer + */ + goto no_buffer; + } else { + /* We do not want read in the flags */ + bp->b_flags &= ~XBF_READ; + } + } + + return bp; + + no_buffer: + if (flags & (XBF_LOCK | XBF_TRYLOCK)) + xfs_buf_unlock(bp); + xfs_buf_rele(bp); + return NULL; +} + +/* + * If we are not low on memory then do the readahead in a deadlock + * safe manner. + */ +void +xfs_buf_readahead( + xfs_buftarg_t *target, + xfs_off_t ioff, + size_t isize) +{ + if (bdi_read_congested(target->bt_bdi)) + return; + + xfs_buf_read(target, ioff, isize, + XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK); +} + +/* + * Read an uncached buffer from disk. Allocates and returns a locked + * buffer containing the disk contents or nothing. + */ +struct xfs_buf * +xfs_buf_read_uncached( + struct xfs_mount *mp, + struct xfs_buftarg *target, + xfs_daddr_t daddr, + size_t length, + int flags) +{ + xfs_buf_t *bp; + int error; + + bp = xfs_buf_get_uncached(target, length, flags); + if (!bp) + return NULL; + + /* set up the buffer for a read IO */ + XFS_BUF_SET_ADDR(bp, daddr); + XFS_BUF_READ(bp); + + xfsbdstrat(mp, bp); + error = xfs_buf_iowait(bp); + if (error || bp->b_error) { + xfs_buf_relse(bp); + return NULL; + } + return bp; +} + +xfs_buf_t * +xfs_buf_get_empty( + size_t len, + xfs_buftarg_t *target) +{ + xfs_buf_t *bp; + + bp = xfs_buf_allocate(0); + if (bp) + _xfs_buf_initialize(bp, target, 0, len, 0); + return bp; +} + +/* + * Return a buffer allocated as an empty buffer and associated to external + * memory via xfs_buf_associate_memory() back to it's empty state. + */ +void +xfs_buf_set_empty( + struct xfs_buf *bp, + size_t len) +{ + if (bp->b_pages) + _xfs_buf_free_pages(bp); + + bp->b_pages = NULL; + bp->b_page_count = 0; + bp->b_addr = NULL; + bp->b_file_offset = 0; + bp->b_buffer_length = bp->b_count_desired = len; + bp->b_bn = XFS_BUF_DADDR_NULL; + bp->b_flags &= ~XBF_MAPPED; +} + +static inline struct page * +mem_to_page( + void *addr) +{ + if ((!is_vmalloc_addr(addr))) { + return virt_to_page(addr); + } else { + return vmalloc_to_page(addr); + } +} + +int +xfs_buf_associate_memory( + xfs_buf_t *bp, + void *mem, + size_t len) +{ + int rval; + int i = 0; + unsigned long pageaddr; + unsigned long offset; + size_t buflen; + int page_count; + + pageaddr = (unsigned long)mem & PAGE_MASK; + offset = (unsigned long)mem - pageaddr; + buflen = PAGE_ALIGN(len + offset); + page_count = buflen >> PAGE_SHIFT; + + /* Free any previous set of page pointers */ + if (bp->b_pages) + _xfs_buf_free_pages(bp); + + bp->b_pages = NULL; + bp->b_addr = mem; + + rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK); + if (rval) + return rval; + + bp->b_offset = offset; + + for (i = 0; i < bp->b_page_count; i++) { + bp->b_pages[i] = mem_to_page((void *)pageaddr); + pageaddr += PAGE_SIZE; + } + + bp->b_count_desired = len; + bp->b_buffer_length = buflen; + bp->b_flags |= XBF_MAPPED; + + return 0; +} + +xfs_buf_t * +xfs_buf_get_uncached( + struct xfs_buftarg *target, + size_t len, + int flags) +{ + unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; + int error, i; + xfs_buf_t *bp; + + bp = xfs_buf_allocate(0); + if (unlikely(bp == NULL)) + goto fail; + _xfs_buf_initialize(bp, target, 0, len, 0); + + error = _xfs_buf_get_pages(bp, page_count, 0); + if (error) + goto fail_free_buf; + + for (i = 0; i < page_count; i++) { + bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); + if (!bp->b_pages[i]) + goto fail_free_mem; + } + bp->b_flags |= _XBF_PAGES; + + error = _xfs_buf_map_pages(bp, XBF_MAPPED); + if (unlikely(error)) { + xfs_warn(target->bt_mount, + "%s: failed to map pages\n", __func__); + goto fail_free_mem; + } + + trace_xfs_buf_get_uncached(bp, _RET_IP_); + return bp; + + fail_free_mem: + while (--i >= 0) + __free_page(bp->b_pages[i]); + _xfs_buf_free_pages(bp); + fail_free_buf: + xfs_buf_deallocate(bp); + fail: + return NULL; +} + +/* + * Increment reference count on buffer, to hold the buffer concurrently + * with another thread which may release (free) the buffer asynchronously. + * Must hold the buffer already to call this function. + */ +void +xfs_buf_hold( + xfs_buf_t *bp) +{ + trace_xfs_buf_hold(bp, _RET_IP_); + atomic_inc(&bp->b_hold); +} + +/* + * Releases a hold on the specified buffer. If the + * the hold count is 1, calls xfs_buf_free. + */ +void +xfs_buf_rele( + xfs_buf_t *bp) +{ + struct xfs_perag *pag = bp->b_pag; + + trace_xfs_buf_rele(bp, _RET_IP_); + + if (!pag) { + ASSERT(list_empty(&bp->b_lru)); + ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); + if (atomic_dec_and_test(&bp->b_hold)) + xfs_buf_free(bp); + return; + } + + ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); + + ASSERT(atomic_read(&bp->b_hold) > 0); + if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { + if (!(bp->b_flags & XBF_STALE) && + atomic_read(&bp->b_lru_ref)) { + xfs_buf_lru_add(bp); + spin_unlock(&pag->pag_buf_lock); + } else { + xfs_buf_lru_del(bp); + ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); + rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); + spin_unlock(&pag->pag_buf_lock); + xfs_perag_put(pag); + xfs_buf_free(bp); + } + } +} + + +/* + * Lock a buffer object, if it is not already locked. + * + * If we come across a stale, pinned, locked buffer, we know that we are + * being asked to lock a buffer that has been reallocated. Because it is + * pinned, we know that the log has not been pushed to disk and hence it + * will still be locked. Rather than continuing to have trylock attempts + * fail until someone else pushes the log, push it ourselves before + * returning. This means that the xfsaild will not get stuck trying + * to push on stale inode buffers. + */ +int +xfs_buf_trylock( + struct xfs_buf *bp) +{ + int locked; + + locked = down_trylock(&bp->b_sema) == 0; + if (locked) + XB_SET_OWNER(bp); + else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) + xfs_log_force(bp->b_target->bt_mount, 0); + + trace_xfs_buf_trylock(bp, _RET_IP_); + return locked; +} + +/* + * Lock a buffer object. + * + * If we come across a stale, pinned, locked buffer, we know that we + * are being asked to lock a buffer that has been reallocated. Because + * it is pinned, we know that the log has not been pushed to disk and + * hence it will still be locked. Rather than sleeping until someone + * else pushes the log, push it ourselves before trying to get the lock. + */ +void +xfs_buf_lock( + struct xfs_buf *bp) +{ + trace_xfs_buf_lock(bp, _RET_IP_); + + if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) + xfs_log_force(bp->b_target->bt_mount, 0); + down(&bp->b_sema); + XB_SET_OWNER(bp); + + trace_xfs_buf_lock_done(bp, _RET_IP_); +} + +/* + * Releases the lock on the buffer object. + * If the buffer is marked delwri but is not queued, do so before we + * unlock the buffer as we need to set flags correctly. We also need to + * take a reference for the delwri queue because the unlocker is going to + * drop their's and they don't know we just queued it. + */ +void +xfs_buf_unlock( + struct xfs_buf *bp) +{ + if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) { + atomic_inc(&bp->b_hold); + bp->b_flags |= XBF_ASYNC; + xfs_buf_delwri_queue(bp, 0); + } + + XB_CLEAR_OWNER(bp); + up(&bp->b_sema); + + trace_xfs_buf_unlock(bp, _RET_IP_); +} + +STATIC void +xfs_buf_wait_unpin( + xfs_buf_t *bp) +{ + DECLARE_WAITQUEUE (wait, current); + + if (atomic_read(&bp->b_pin_count) == 0) + return; + + add_wait_queue(&bp->b_waiters, &wait); + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (atomic_read(&bp->b_pin_count) == 0) + break; + io_schedule(); + } + remove_wait_queue(&bp->b_waiters, &wait); + set_current_state(TASK_RUNNING); +} + +/* + * Buffer Utility Routines + */ + +STATIC void +xfs_buf_iodone_work( + struct work_struct *work) +{ + xfs_buf_t *bp = + container_of(work, xfs_buf_t, b_iodone_work); + + if (bp->b_iodone) + (*(bp->b_iodone))(bp); + else if (bp->b_flags & XBF_ASYNC) + xfs_buf_relse(bp); +} + +void +xfs_buf_ioend( + xfs_buf_t *bp, + int schedule) +{ + trace_xfs_buf_iodone(bp, _RET_IP_); + + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); + if (bp->b_error == 0) + bp->b_flags |= XBF_DONE; + + if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { + if (schedule) { + INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); + queue_work(xfslogd_workqueue, &bp->b_iodone_work); + } else { + xfs_buf_iodone_work(&bp->b_iodone_work); + } + } else { + complete(&bp->b_iowait); + } +} + +void +xfs_buf_ioerror( + xfs_buf_t *bp, + int error) +{ + ASSERT(error >= 0 && error <= 0xffff); + bp->b_error = (unsigned short)error; + trace_xfs_buf_ioerror(bp, error, _RET_IP_); +} + +int +xfs_bwrite( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + int error; + + bp->b_flags |= XBF_WRITE; + bp->b_flags &= ~(XBF_ASYNC | XBF_READ); + + xfs_buf_delwri_dequeue(bp); + xfs_bdstrat_cb(bp); + + error = xfs_buf_iowait(bp); + if (error) + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + xfs_buf_relse(bp); + return error; +} + +void +xfs_bdwrite( + void *mp, + struct xfs_buf *bp) +{ + trace_xfs_buf_bdwrite(bp, _RET_IP_); + + bp->b_flags &= ~XBF_READ; + bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); + + xfs_buf_delwri_queue(bp, 1); +} + +/* + * Called when we want to stop a buffer from getting written or read. + * We attach the EIO error, muck with its flags, and call xfs_buf_ioend + * so that the proper iodone callbacks get called. + */ +STATIC int +xfs_bioerror( + xfs_buf_t *bp) +{ +#ifdef XFSERRORDEBUG + ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); +#endif + + /* + * No need to wait until the buffer is unpinned, we aren't flushing it. + */ + xfs_buf_ioerror(bp, EIO); + + /* + * We're calling xfs_buf_ioend, so delete XBF_DONE flag. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_UNDONE(bp); + XFS_BUF_STALE(bp); + + xfs_buf_ioend(bp, 0); + + return EIO; +} + +/* + * Same as xfs_bioerror, except that we are releasing the buffer + * here ourselves, and avoiding the xfs_buf_ioend call. + * This is meant for userdata errors; metadata bufs come with + * iodone functions attached, so that we can track down errors. + */ +STATIC int +xfs_bioerror_relse( + struct xfs_buf *bp) +{ + int64_t fl = bp->b_flags; + /* + * No need to wait until the buffer is unpinned. + * We aren't flushing it. + * + * chunkhold expects B_DONE to be set, whether + * we actually finish the I/O or not. We don't want to + * change that interface. + */ + XFS_BUF_UNREAD(bp); + XFS_BUF_UNDELAYWRITE(bp); + XFS_BUF_DONE(bp); + XFS_BUF_STALE(bp); + bp->b_iodone = NULL; + if (!(fl & XBF_ASYNC)) { + /* + * Mark b_error and B_ERROR _both_. + * Lot's of chunkcache code assumes that. + * There's no reason to mark error for + * ASYNC buffers. + */ + xfs_buf_ioerror(bp, EIO); + XFS_BUF_FINISH_IOWAIT(bp); + } else { + xfs_buf_relse(bp); + } + + return EIO; +} + + +/* + * All xfs metadata buffers except log state machine buffers + * get this attached as their b_bdstrat callback function. + * This is so that we can catch a buffer + * after prematurely unpinning it to forcibly shutdown the filesystem. + */ +int +xfs_bdstrat_cb( + struct xfs_buf *bp) +{ + if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { + trace_xfs_bdstrat_shut(bp, _RET_IP_); + /* + * Metadata write that didn't get logged but + * written delayed anyway. These aren't associated + * with a transaction, and can be ignored. + */ + if (!bp->b_iodone && !XFS_BUF_ISREAD(bp)) + return xfs_bioerror_relse(bp); + else + return xfs_bioerror(bp); + } + + xfs_buf_iorequest(bp); + return 0; +} + +/* + * Wrapper around bdstrat so that we can stop data from going to disk in case + * we are shutting down the filesystem. Typically user data goes thru this + * path; one of the exceptions is the superblock. + */ +void +xfsbdstrat( + struct xfs_mount *mp, + struct xfs_buf *bp) +{ + if (XFS_FORCED_SHUTDOWN(mp)) { + trace_xfs_bdstrat_shut(bp, _RET_IP_); + xfs_bioerror_relse(bp); + return; + } + + xfs_buf_iorequest(bp); +} + +STATIC void +_xfs_buf_ioend( + xfs_buf_t *bp, + int schedule) +{ + if (atomic_dec_and_test(&bp->b_io_remaining) == 1) + xfs_buf_ioend(bp, schedule); +} + +STATIC void +xfs_buf_bio_end_io( + struct bio *bio, + int error) +{ + xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; + + xfs_buf_ioerror(bp, -error); + + if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) + invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); + + _xfs_buf_ioend(bp, 1); + bio_put(bio); +} + +STATIC void +_xfs_buf_ioapply( + xfs_buf_t *bp) +{ + int rw, map_i, total_nr_pages, nr_pages; + struct bio *bio; + int offset = bp->b_offset; + int size = bp->b_count_desired; + sector_t sector = bp->b_bn; + + total_nr_pages = bp->b_page_count; + map_i = 0; + + if (bp->b_flags & XBF_WRITE) { + if (bp->b_flags & XBF_SYNCIO) + rw = WRITE_SYNC; + else + rw = WRITE; + if (bp->b_flags & XBF_FUA) + rw |= REQ_FUA; + if (bp->b_flags & XBF_FLUSH) + rw |= REQ_FLUSH; + } else if (bp->b_flags & XBF_READ_AHEAD) { + rw = READA; + } else { + rw = READ; + } + + /* we only use the buffer cache for meta-data */ + rw |= REQ_META; + +next_chunk: + atomic_inc(&bp->b_io_remaining); + nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); + if (nr_pages > total_nr_pages) + nr_pages = total_nr_pages; + + bio = bio_alloc(GFP_NOIO, nr_pages); + bio->bi_bdev = bp->b_target->bt_bdev; + bio->bi_sector = sector; + bio->bi_end_io = xfs_buf_bio_end_io; + bio->bi_private = bp; + + + for (; size && nr_pages; nr_pages--, map_i++) { + int rbytes, nbytes = PAGE_SIZE - offset; + + if (nbytes > size) + nbytes = size; + + rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset); + if (rbytes < nbytes) + break; + + offset = 0; + sector += nbytes >> BBSHIFT; + size -= nbytes; + total_nr_pages--; + } + + if (likely(bio->bi_size)) { + if (xfs_buf_is_vmapped(bp)) { + flush_kernel_vmap_range(bp->b_addr, + xfs_buf_vmap_len(bp)); + } + submit_bio(rw, bio); + if (size) + goto next_chunk; + } else { + xfs_buf_ioerror(bp, EIO); + bio_put(bio); + } +} + +int +xfs_buf_iorequest( + xfs_buf_t *bp) +{ + trace_xfs_buf_iorequest(bp, _RET_IP_); + + if (bp->b_flags & XBF_DELWRI) { + xfs_buf_delwri_queue(bp, 1); + return 0; + } + + if (bp->b_flags & XBF_WRITE) { + xfs_buf_wait_unpin(bp); + } + + xfs_buf_hold(bp); + + /* Set the count to 1 initially, this will stop an I/O + * completion callout which happens before we have started + * all the I/O from calling xfs_buf_ioend too early. + */ + atomic_set(&bp->b_io_remaining, 1); + _xfs_buf_ioapply(bp); + _xfs_buf_ioend(bp, 0); + + xfs_buf_rele(bp); + return 0; +} + +/* + * Waits for I/O to complete on the buffer supplied. + * It returns immediately if no I/O is pending. + * It returns the I/O error code, if any, or 0 if there was no error. + */ +int +xfs_buf_iowait( + xfs_buf_t *bp) +{ + trace_xfs_buf_iowait(bp, _RET_IP_); + + wait_for_completion(&bp->b_iowait); + + trace_xfs_buf_iowait_done(bp, _RET_IP_); + return bp->b_error; +} + +xfs_caddr_t +xfs_buf_offset( + xfs_buf_t *bp, + size_t offset) +{ + struct page *page; + + if (bp->b_flags & XBF_MAPPED) + return bp->b_addr + offset; + + offset += bp->b_offset; + page = bp->b_pages[offset >> PAGE_SHIFT]; + return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); +} + +/* + * Move data into or out of a buffer. + */ +void +xfs_buf_iomove( + xfs_buf_t *bp, /* buffer to process */ + size_t boff, /* starting buffer offset */ + size_t bsize, /* length to copy */ + void *data, /* data address */ + xfs_buf_rw_t mode) /* read/write/zero flag */ +{ + size_t bend, cpoff, csize; + struct page *page; + + bend = boff + bsize; + while (boff < bend) { + page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)]; + cpoff = xfs_buf_poff(boff + bp->b_offset); + csize = min_t(size_t, + PAGE_SIZE-cpoff, bp->b_count_desired-boff); + + ASSERT(((csize + cpoff) <= PAGE_SIZE)); + + switch (mode) { + case XBRW_ZERO: + memset(page_address(page) + cpoff, 0, csize); + break; + case XBRW_READ: + memcpy(data, page_address(page) + cpoff, csize); + break; + case XBRW_WRITE: + memcpy(page_address(page) + cpoff, data, csize); + } + + boff += csize; + data += csize; + } +} + +/* + * Handling of buffer targets (buftargs). + */ + +/* + * Wait for any bufs with callbacks that have been submitted but have not yet + * returned. These buffers will have an elevated hold count, so wait on those + * while freeing all the buffers only held by the LRU. + */ +void +xfs_wait_buftarg( + struct xfs_buftarg *btp) +{ + struct xfs_buf *bp; + +restart: + spin_lock(&btp->bt_lru_lock); + while (!list_empty(&btp->bt_lru)) { + bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); + if (atomic_read(&bp->b_hold) > 1) { + spin_unlock(&btp->bt_lru_lock); + delay(100); + goto restart; + } + /* + * clear the LRU reference count so the bufer doesn't get + * ignored in xfs_buf_rele(). + */ + atomic_set(&bp->b_lru_ref, 0); + spin_unlock(&btp->bt_lru_lock); + xfs_buf_rele(bp); + spin_lock(&btp->bt_lru_lock); + } + spin_unlock(&btp->bt_lru_lock); +} + +int +xfs_buftarg_shrink( + struct shrinker *shrink, + struct shrink_control *sc) +{ + struct xfs_buftarg *btp = container_of(shrink, + struct xfs_buftarg, bt_shrinker); + struct xfs_buf *bp; + int nr_to_scan = sc->nr_to_scan; + LIST_HEAD(dispose); + + if (!nr_to_scan) + return btp->bt_lru_nr; + + spin_lock(&btp->bt_lru_lock); + while (!list_empty(&btp->bt_lru)) { + if (nr_to_scan-- <= 0) + break; + + bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); + + /* + * Decrement the b_lru_ref count unless the value is already + * zero. If the value is already zero, we need to reclaim the + * buffer, otherwise it gets another trip through the LRU. + */ + if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { + list_move_tail(&bp->b_lru, &btp->bt_lru); + continue; + } + + /* + * remove the buffer from the LRU now to avoid needing another + * lock round trip inside xfs_buf_rele(). + */ + list_move(&bp->b_lru, &dispose); + btp->bt_lru_nr--; + } + spin_unlock(&btp->bt_lru_lock); + + while (!list_empty(&dispose)) { + bp = list_first_entry(&dispose, struct xfs_buf, b_lru); + list_del_init(&bp->b_lru); + xfs_buf_rele(bp); + } + + return btp->bt_lru_nr; +} + +void +xfs_free_buftarg( + struct xfs_mount *mp, + struct xfs_buftarg *btp) +{ + unregister_shrinker(&btp->bt_shrinker); + + xfs_flush_buftarg(btp, 1); + if (mp->m_flags & XFS_MOUNT_BARRIER) + xfs_blkdev_issue_flush(btp); + + kthread_stop(btp->bt_task); + kmem_free(btp); +} + +STATIC int +xfs_setsize_buftarg_flags( + xfs_buftarg_t *btp, + unsigned int blocksize, + unsigned int sectorsize, + int verbose) +{ + btp->bt_bsize = blocksize; + btp->bt_sshift = ffs(sectorsize) - 1; + btp->bt_smask = sectorsize - 1; + + if (set_blocksize(btp->bt_bdev, sectorsize)) { + xfs_warn(btp->bt_mount, + "Cannot set_blocksize to %u on device %s\n", + sectorsize, xfs_buf_target_name(btp)); + return EINVAL; + } + + return 0; +} + +/* + * When allocating the initial buffer target we have not yet + * read in the superblock, so don't know what sized sectors + * are being used is at this early stage. Play safe. + */ +STATIC int +xfs_setsize_buftarg_early( + xfs_buftarg_t *btp, + struct block_device *bdev) +{ + return xfs_setsize_buftarg_flags(btp, + PAGE_SIZE, bdev_logical_block_size(bdev), 0); +} + +int +xfs_setsize_buftarg( + xfs_buftarg_t *btp, + unsigned int blocksize, + unsigned int sectorsize) +{ + return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); +} + +STATIC int +xfs_alloc_delwrite_queue( + xfs_buftarg_t *btp, + const char *fsname) +{ + INIT_LIST_HEAD(&btp->bt_delwrite_queue); + spin_lock_init(&btp->bt_delwrite_lock); + btp->bt_flags = 0; + btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); + if (IS_ERR(btp->bt_task)) + return PTR_ERR(btp->bt_task); + return 0; +} + +xfs_buftarg_t * +xfs_alloc_buftarg( + struct xfs_mount *mp, + struct block_device *bdev, + int external, + const char *fsname) +{ + xfs_buftarg_t *btp; + + btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); + + btp->bt_mount = mp; + btp->bt_dev = bdev->bd_dev; + btp->bt_bdev = bdev; + btp->bt_bdi = blk_get_backing_dev_info(bdev); + if (!btp->bt_bdi) + goto error; + + INIT_LIST_HEAD(&btp->bt_lru); + spin_lock_init(&btp->bt_lru_lock); + if (xfs_setsize_buftarg_early(btp, bdev)) + goto error; + if (xfs_alloc_delwrite_queue(btp, fsname)) + goto error; + btp->bt_shrinker.shrink = xfs_buftarg_shrink; + btp->bt_shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&btp->bt_shrinker); + return btp; + +error: + kmem_free(btp); + return NULL; +} + + +/* + * Delayed write buffer handling + */ +STATIC void +xfs_buf_delwri_queue( + xfs_buf_t *bp, + int unlock) +{ + struct list_head *dwq = &bp->b_target->bt_delwrite_queue; + spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; + + trace_xfs_buf_delwri_queue(bp, _RET_IP_); + + ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC)); + + spin_lock(dwlk); + /* If already in the queue, dequeue and place at tail */ + if (!list_empty(&bp->b_list)) { + ASSERT(bp->b_flags & _XBF_DELWRI_Q); + if (unlock) + atomic_dec(&bp->b_hold); + list_del(&bp->b_list); + } + + if (list_empty(dwq)) { + /* start xfsbufd as it is about to have something to do */ + wake_up_process(bp->b_target->bt_task); + } + + bp->b_flags |= _XBF_DELWRI_Q; + list_add_tail(&bp->b_list, dwq); + bp->b_queuetime = jiffies; + spin_unlock(dwlk); + + if (unlock) + xfs_buf_unlock(bp); +} + +void +xfs_buf_delwri_dequeue( + xfs_buf_t *bp) +{ + spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock; + int dequeued = 0; + + spin_lock(dwlk); + if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) { + ASSERT(bp->b_flags & _XBF_DELWRI_Q); + list_del_init(&bp->b_list); + dequeued = 1; + } + bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q); + spin_unlock(dwlk); + + if (dequeued) + xfs_buf_rele(bp); + + trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); +} + +/* + * If a delwri buffer needs to be pushed before it has aged out, then promote + * it to the head of the delwri queue so that it will be flushed on the next + * xfsbufd run. We do this by resetting the queuetime of the buffer to be older + * than the age currently needed to flush the buffer. Hence the next time the + * xfsbufd sees it is guaranteed to be considered old enough to flush. + */ +void +xfs_buf_delwri_promote( + struct xfs_buf *bp) +{ + struct xfs_buftarg *btp = bp->b_target; + long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1; + + ASSERT(bp->b_flags & XBF_DELWRI); + ASSERT(bp->b_flags & _XBF_DELWRI_Q); + + /* + * Check the buffer age before locking the delayed write queue as we + * don't need to promote buffers that are already past the flush age. + */ + if (bp->b_queuetime < jiffies - age) + return; + bp->b_queuetime = jiffies - age; + spin_lock(&btp->bt_delwrite_lock); + list_move(&bp->b_list, &btp->bt_delwrite_queue); + spin_unlock(&btp->bt_delwrite_lock); +} + +STATIC void +xfs_buf_runall_queues( + struct workqueue_struct *queue) +{ + flush_workqueue(queue); +} + +/* + * Move as many buffers as specified to the supplied list + * idicating if we skipped any buffers to prevent deadlocks. + */ +STATIC int +xfs_buf_delwri_split( + xfs_buftarg_t *target, + struct list_head *list, + unsigned long age) +{ + xfs_buf_t *bp, *n; + struct list_head *dwq = &target->bt_delwrite_queue; + spinlock_t *dwlk = &target->bt_delwrite_lock; + int skipped = 0; + int force; + + force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags); + INIT_LIST_HEAD(list); + spin_lock(dwlk); + list_for_each_entry_safe(bp, n, dwq, b_list) { + ASSERT(bp->b_flags & XBF_DELWRI); + + if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) { + if (!force && + time_before(jiffies, bp->b_queuetime + age)) { + xfs_buf_unlock(bp); + break; + } + + bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q); + bp->b_flags |= XBF_WRITE; + list_move_tail(&bp->b_list, list); + trace_xfs_buf_delwri_split(bp, _RET_IP_); + } else + skipped++; + } + spin_unlock(dwlk); + + return skipped; + +} + +/* + * Compare function is more complex than it needs to be because + * the return value is only 32 bits and we are doing comparisons + * on 64 bit values + */ +static int +xfs_buf_cmp( + void *priv, + struct list_head *a, + struct list_head *b) +{ + struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); + struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); + xfs_daddr_t diff; + + diff = ap->b_bn - bp->b_bn; + if (diff < 0) + return -1; + if (diff > 0) + return 1; + return 0; +} + +STATIC int +xfsbufd( + void *data) +{ + xfs_buftarg_t *target = (xfs_buftarg_t *)data; + + current->flags |= PF_MEMALLOC; + + set_freezable(); + + do { + long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); + long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); + struct list_head tmp; + struct blk_plug plug; + + if (unlikely(freezing(current))) { + set_bit(XBT_FORCE_SLEEP, &target->bt_flags); + refrigerator(); + } else { + clear_bit(XBT_FORCE_SLEEP, &target->bt_flags); + } + + /* sleep for a long time if there is nothing to do. */ + if (list_empty(&target->bt_delwrite_queue)) + tout = MAX_SCHEDULE_TIMEOUT; + schedule_timeout_interruptible(tout); + + xfs_buf_delwri_split(target, &tmp, age); + list_sort(NULL, &tmp, xfs_buf_cmp); + + blk_start_plug(&plug); + while (!list_empty(&tmp)) { + struct xfs_buf *bp; + bp = list_first_entry(&tmp, struct xfs_buf, b_list); + list_del_init(&bp->b_list); + xfs_bdstrat_cb(bp); + } + blk_finish_plug(&plug); + } while (!kthread_should_stop()); + + return 0; +} + +/* + * Go through all incore buffers, and release buffers if they belong to + * the given device. This is used in filesystem error handling to + * preserve the consistency of its metadata. + */ +int +xfs_flush_buftarg( + xfs_buftarg_t *target, + int wait) +{ + xfs_buf_t *bp; + int pincount = 0; + LIST_HEAD(tmp_list); + LIST_HEAD(wait_list); + struct blk_plug plug; + + xfs_buf_runall_queues(xfsconvertd_workqueue); + xfs_buf_runall_queues(xfsdatad_workqueue); + xfs_buf_runall_queues(xfslogd_workqueue); + + set_bit(XBT_FORCE_FLUSH, &target->bt_flags); + pincount = xfs_buf_delwri_split(target, &tmp_list, 0); + + /* + * Dropped the delayed write list lock, now walk the temporary list. + * All I/O is issued async and then if we need to wait for completion + * we do that after issuing all the IO. + */ + list_sort(NULL, &tmp_list, xfs_buf_cmp); + + blk_start_plug(&plug); + while (!list_empty(&tmp_list)) { + bp = list_first_entry(&tmp_list, struct xfs_buf, b_list); + ASSERT(target == bp->b_target); + list_del_init(&bp->b_list); + if (wait) { + bp->b_flags &= ~XBF_ASYNC; + list_add(&bp->b_list, &wait_list); + } + xfs_bdstrat_cb(bp); + } + blk_finish_plug(&plug); + + if (wait) { + /* Wait for IO to complete. */ + while (!list_empty(&wait_list)) { + bp = list_first_entry(&wait_list, struct xfs_buf, b_list); + + list_del_init(&bp->b_list); + xfs_buf_iowait(bp); + xfs_buf_relse(bp); + } + } + + return pincount; +} + +int __init +xfs_buf_init(void) +{ + xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", + KM_ZONE_HWALIGN, NULL); + if (!xfs_buf_zone) + goto out; + + xfslogd_workqueue = alloc_workqueue("xfslogd", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); + if (!xfslogd_workqueue) + goto out_free_buf_zone; + + xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1); + if (!xfsdatad_workqueue) + goto out_destroy_xfslogd_workqueue; + + xfsconvertd_workqueue = alloc_workqueue("xfsconvertd", + WQ_MEM_RECLAIM, 1); + if (!xfsconvertd_workqueue) + goto out_destroy_xfsdatad_workqueue; + + return 0; + + out_destroy_xfsdatad_workqueue: + destroy_workqueue(xfsdatad_workqueue); + out_destroy_xfslogd_workqueue: + destroy_workqueue(xfslogd_workqueue); + out_free_buf_zone: + kmem_zone_destroy(xfs_buf_zone); + out: + return -ENOMEM; +} + +void +xfs_buf_terminate(void) +{ + destroy_workqueue(xfsconvertd_workqueue); + destroy_workqueue(xfsdatad_workqueue); + destroy_workqueue(xfslogd_workqueue); + kmem_zone_destroy(xfs_buf_zone); +} + +#ifdef CONFIG_KDB_MODULES +struct list_head * +xfs_get_buftarg_list(void) +{ + return &xfs_buftarg_list; +} +#endif diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h new file mode 100644 index 0000000..620972b --- /dev/null +++ b/fs/xfs/xfs_buf.h @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_BUF_H__ +#define __XFS_BUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Base types + */ + +#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) + +#define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) +#define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) +#define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) +#define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) + +typedef enum { + XBRW_READ = 1, /* transfer into target memory */ + XBRW_WRITE = 2, /* transfer from target memory */ + XBRW_ZERO = 3, /* Zero target memory */ +} xfs_buf_rw_t; + +#define XBF_READ (1 << 0) /* buffer intended for reading from device */ +#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ +#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ +#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */ +#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ +#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ +#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ +#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ + +/* I/O hints for the BIO layer */ +#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ +#define XBF_FUA (1 << 11)/* force cache write through mode */ +#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ + +/* flags used only as arguments to access routines */ +#define XBF_LOCK (1 << 15)/* lock requested */ +#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ +#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */ + +/* flags used only internally */ +#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ +#define _XBF_KMEM (1 << 21)/* backed by heap memory */ +#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */ + +typedef unsigned int xfs_buf_flags_t; + +#define XFS_BUF_FLAGS \ + { XBF_READ, "READ" }, \ + { XBF_WRITE, "WRITE" }, \ + { XBF_READ_AHEAD, "READ_AHEAD" }, \ + { XBF_MAPPED, "MAPPED" }, \ + { XBF_ASYNC, "ASYNC" }, \ + { XBF_DONE, "DONE" }, \ + { XBF_DELWRI, "DELWRI" }, \ + { XBF_STALE, "STALE" }, \ + { XBF_SYNCIO, "SYNCIO" }, \ + { XBF_FUA, "FUA" }, \ + { XBF_FLUSH, "FLUSH" }, \ + { XBF_LOCK, "LOCK" }, /* should never be set */\ + { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ + { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ + { _XBF_PAGES, "PAGES" }, \ + { _XBF_KMEM, "KMEM" }, \ + { _XBF_DELWRI_Q, "DELWRI_Q" } + +typedef enum { + XBT_FORCE_SLEEP = 0, + XBT_FORCE_FLUSH = 1, +} xfs_buftarg_flags_t; + +typedef struct xfs_buftarg { + dev_t bt_dev; + struct block_device *bt_bdev; + struct backing_dev_info *bt_bdi; + struct xfs_mount *bt_mount; + unsigned int bt_bsize; + unsigned int bt_sshift; + size_t bt_smask; + + /* per device delwri queue */ + struct task_struct *bt_task; + struct list_head bt_delwrite_queue; + spinlock_t bt_delwrite_lock; + unsigned long bt_flags; + + /* LRU control structures */ + struct shrinker bt_shrinker; + struct list_head bt_lru; + spinlock_t bt_lru_lock; + unsigned int bt_lru_nr; +} xfs_buftarg_t; + +struct xfs_buf; +typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); + +#define XB_PAGES 2 + +typedef struct xfs_buf { + /* + * first cacheline holds all the fields needed for an uncontended cache + * hit to be fully processed. The semaphore straddles the cacheline + * boundary, but the counter and lock sits on the first cacheline, + * which is the only bit that is touched if we hit the semaphore + * fast-path on locking. + */ + struct rb_node b_rbnode; /* rbtree node */ + xfs_off_t b_file_offset; /* offset in file */ + size_t b_buffer_length;/* size of buffer in bytes */ + atomic_t b_hold; /* reference count */ + atomic_t b_lru_ref; /* lru reclaim ref count */ + xfs_buf_flags_t b_flags; /* status flags */ + struct semaphore b_sema; /* semaphore for lockables */ + + struct list_head b_lru; /* lru list */ + wait_queue_head_t b_waiters; /* unpin waiters */ + struct list_head b_list; + struct xfs_perag *b_pag; /* contains rbtree root */ + xfs_buftarg_t *b_target; /* buffer target (device) */ + xfs_daddr_t b_bn; /* block number for I/O */ + size_t b_count_desired;/* desired transfer size */ + void *b_addr; /* virtual address of buffer */ + struct work_struct b_iodone_work; + xfs_buf_iodone_t b_iodone; /* I/O completion function */ + struct completion b_iowait; /* queue for I/O waiters */ + void *b_fspriv; + struct xfs_trans *b_transp; + struct page **b_pages; /* array of page pointers */ + struct page *b_page_array[XB_PAGES]; /* inline pages */ + unsigned long b_queuetime; /* time buffer was queued */ + atomic_t b_pin_count; /* pin count */ + atomic_t b_io_remaining; /* #outstanding I/O requests */ + unsigned int b_page_count; /* size of page array */ + unsigned int b_offset; /* page offset in first page */ + unsigned short b_error; /* error code on I/O */ +#ifdef XFS_BUF_LOCK_TRACKING + int b_last_holder; +#endif +} xfs_buf_t; + + +/* Finding and Reading Buffers */ +extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t, + xfs_buf_flags_t, xfs_buf_t *); +#define xfs_incore(buftarg,blkno,len,lockit) \ + _xfs_buf_find(buftarg, blkno ,len, lockit, NULL) + +extern xfs_buf_t *xfs_buf_get(xfs_buftarg_t *, xfs_off_t, size_t, + xfs_buf_flags_t); +extern xfs_buf_t *xfs_buf_read(xfs_buftarg_t *, xfs_off_t, size_t, + xfs_buf_flags_t); + +extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *); +extern void xfs_buf_set_empty(struct xfs_buf *bp, size_t len); +extern xfs_buf_t *xfs_buf_get_uncached(struct xfs_buftarg *, size_t, int); +extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t); +extern void xfs_buf_hold(xfs_buf_t *); +extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t); +struct xfs_buf *xfs_buf_read_uncached(struct xfs_mount *mp, + struct xfs_buftarg *target, + xfs_daddr_t daddr, size_t length, int flags); + +/* Releasing Buffers */ +extern void xfs_buf_free(xfs_buf_t *); +extern void xfs_buf_rele(xfs_buf_t *); + +/* Locking and Unlocking Buffers */ +extern int xfs_buf_trylock(xfs_buf_t *); +extern void xfs_buf_lock(xfs_buf_t *); +extern void xfs_buf_unlock(xfs_buf_t *); +#define xfs_buf_islocked(bp) \ + ((bp)->b_sema.count <= 0) + +/* Buffer Read and Write Routines */ +extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); +extern void xfs_bdwrite(void *mp, xfs_buf_t *bp); + +extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *); +extern int xfs_bdstrat_cb(struct xfs_buf *); + +extern void xfs_buf_ioend(xfs_buf_t *, int); +extern void xfs_buf_ioerror(xfs_buf_t *, int); +extern int xfs_buf_iorequest(xfs_buf_t *); +extern int xfs_buf_iowait(xfs_buf_t *); +extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, + xfs_buf_rw_t); +#define xfs_buf_zero(bp, off, len) \ + xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO) + +static inline int xfs_buf_geterror(xfs_buf_t *bp) +{ + return bp ? bp->b_error : ENOMEM; +} + +/* Buffer Utility Routines */ +extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t); + +/* Delayed Write Buffer Routines */ +extern void xfs_buf_delwri_dequeue(xfs_buf_t *); +extern void xfs_buf_delwri_promote(xfs_buf_t *); + +/* Buffer Daemon Setup Routines */ +extern int xfs_buf_init(void); +extern void xfs_buf_terminate(void); + +static inline const char * +xfs_buf_target_name(struct xfs_buftarg *target) +{ + static char __b[BDEVNAME_SIZE]; + + return bdevname(target->bt_bdev, __b); +} + + +#define XFS_BUF_ZEROFLAGS(bp) \ + ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ + XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) + +void xfs_buf_stale(struct xfs_buf *bp); +#define XFS_BUF_STALE(bp) xfs_buf_stale(bp); +#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) +#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) +#define XFS_BUF_SUPER_STALE(bp) do { \ + XFS_BUF_STALE(bp); \ + xfs_buf_delwri_dequeue(bp); \ + XFS_BUF_DONE(bp); \ + } while (0) + +#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI) +#define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp) +#define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI) + +#define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE) +#define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE) +#define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE) + +#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC) +#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) +#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) + +#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) +#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) +#define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ) + +#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE) +#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) +#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) + +#define XFS_BUF_ADDR(bp) ((bp)->b_bn) +#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno)) +#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset) +#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off)) +#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired) +#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt)) +#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length) +#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt)) + +static inline void +xfs_buf_set_ref( + struct xfs_buf *bp, + int lru_ref) +{ + atomic_set(&bp->b_lru_ref, lru_ref); +} +#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref) +#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) + +static inline int xfs_buf_ispinned(struct xfs_buf *bp) +{ + return atomic_read(&bp->b_pin_count); +} + +#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); + +static inline void xfs_buf_relse(xfs_buf_t *bp) +{ + xfs_buf_unlock(bp); + xfs_buf_rele(bp); +} + +/* + * Handling of buftargs. + */ +extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *, + struct block_device *, int, const char *); +extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); +extern void xfs_wait_buftarg(xfs_buftarg_t *); +extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); +extern int xfs_flush_buftarg(xfs_buftarg_t *, int); + +#ifdef CONFIG_KDB_MODULES +extern struct list_head *xfs_get_buftarg_list(void); +#endif + +#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) +#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) + +#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1) +#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1) + +#endif /* __XFS_BUF_H__ */ diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c new file mode 100644 index 0000000..244e797 --- /dev/null +++ b/fs/xfs/xfs_discard.c @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2010 Red Hat, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_sb.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_quota.h" +#include "xfs_trans.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_error.h" +#include "xfs_discard.h" +#include "xfs_trace.h" + +STATIC int +xfs_trim_extents( + struct xfs_mount *mp, + xfs_agnumber_t agno, + xfs_fsblock_t start, + xfs_fsblock_t len, + xfs_fsblock_t minlen, + __uint64_t *blocks_trimmed) +{ + struct block_device *bdev = mp->m_ddev_targp->bt_bdev; + struct xfs_btree_cur *cur; + struct xfs_buf *agbp; + struct xfs_perag *pag; + int error; + int i; + + pag = xfs_perag_get(mp, agno); + + error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); + if (error || !agbp) + goto out_put_perag; + + cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); + + /* + * Force out the log. This means any transactions that might have freed + * space before we took the AGF buffer lock are now on disk, and the + * volatile disk cache is flushed. + */ + xfs_log_force(mp, XFS_LOG_SYNC); + + /* + * Look up the longest btree in the AGF and start with it. + */ + error = xfs_alloc_lookup_le(cur, 0, + XFS_BUF_TO_AGF(agbp)->agf_longest, &i); + if (error) + goto out_del_cursor; + + /* + * Loop until we are done with all extents that are large + * enough to be worth discarding. + */ + while (i) { + xfs_agblock_t fbno; + xfs_extlen_t flen; + + error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); + if (error) + goto out_del_cursor; + XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); + ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest); + + /* + * Too small? Give up. + */ + if (flen < minlen) { + trace_xfs_discard_toosmall(mp, agno, fbno, flen); + goto out_del_cursor; + } + + /* + * If the extent is entirely outside of the range we are + * supposed to discard skip it. Do not bother to trim + * down partially overlapping ranges for now. + */ + if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start || + XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) { + trace_xfs_discard_exclude(mp, agno, fbno, flen); + goto next_extent; + } + + /* + * If any blocks in the range are still busy, skip the + * discard and try again the next time. + */ + if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { + trace_xfs_discard_busy(mp, agno, fbno, flen); + goto next_extent; + } + + trace_xfs_discard_extent(mp, agno, fbno, flen); + error = -blkdev_issue_discard(bdev, + XFS_AGB_TO_DADDR(mp, agno, fbno), + XFS_FSB_TO_BB(mp, flen), + GFP_NOFS, 0); + if (error) + goto out_del_cursor; + *blocks_trimmed += flen; + +next_extent: + error = xfs_btree_decrement(cur, 0, &i); + if (error) + goto out_del_cursor; + } + +out_del_cursor: + xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + xfs_buf_relse(agbp); +out_put_perag: + xfs_perag_put(pag); + return error; +} + +int +xfs_ioc_trim( + struct xfs_mount *mp, + struct fstrim_range __user *urange) +{ + struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; + unsigned int granularity = q->limits.discard_granularity; + struct fstrim_range range; + xfs_fsblock_t start, len, minlen; + xfs_agnumber_t start_agno, end_agno, agno; + __uint64_t blocks_trimmed = 0; + int error, last_error = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (!blk_queue_discard(q)) + return -XFS_ERROR(EOPNOTSUPP); + if (copy_from_user(&range, urange, sizeof(range))) + return -XFS_ERROR(EFAULT); + + /* + * Truncating down the len isn't actually quite correct, but using + * XFS_B_TO_FSB would mean we trivially get overflows for values + * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default + * used by the fstrim application. In the end it really doesn't + * matter as trimming blocks is an advisory interface. + */ + start = XFS_B_TO_FSBT(mp, range.start); + len = XFS_B_TO_FSBT(mp, range.len); + minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen)); + + start_agno = XFS_FSB_TO_AGNO(mp, start); + if (start_agno >= mp->m_sb.sb_agcount) + return -XFS_ERROR(EINVAL); + + end_agno = XFS_FSB_TO_AGNO(mp, start + len); + if (end_agno >= mp->m_sb.sb_agcount) + end_agno = mp->m_sb.sb_agcount - 1; + + for (agno = start_agno; agno <= end_agno; agno++) { + error = -xfs_trim_extents(mp, agno, start, len, minlen, + &blocks_trimmed); + if (error) + last_error = error; + } + + if (last_error) + return last_error; + + range.len = XFS_FSB_TO_B(mp, blocks_trimmed); + if (copy_to_user(urange, &range, sizeof(range))) + return -XFS_ERROR(EFAULT); + return 0; +} + +int +xfs_discard_extents( + struct xfs_mount *mp, + struct list_head *list) +{ + struct xfs_busy_extent *busyp; + int error = 0; + + list_for_each_entry(busyp, list, list) { + trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, + busyp->length); + + error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, + XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), + XFS_FSB_TO_BB(mp, busyp->length), + GFP_NOFS, 0); + if (error && error != EOPNOTSUPP) { + xfs_info(mp, + "discard failed for extent [0x%llu,%u], error %d", + (unsigned long long)busyp->bno, + busyp->length, + error); + return error; + } + } + + return 0; +} diff --git a/fs/xfs/xfs_discard.h b/fs/xfs/xfs_discard.h new file mode 100644 index 0000000..344879a --- /dev/null +++ b/fs/xfs/xfs_discard.h @@ -0,0 +1,10 @@ +#ifndef XFS_DISCARD_H +#define XFS_DISCARD_H 1 + +struct fstrim_range; +struct list_head; + +extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); +extern int xfs_discard_extents(struct xfs_mount *, struct list_head *); + +#endif /* XFS_DISCARD_H */ diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c new file mode 100644 index 0000000..db62959 --- /dev/null +++ b/fs/xfs/xfs_dquot.c @@ -0,0 +1,1454 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_trans_priv.h" +#include "xfs_qm.h" +#include "xfs_trace.h" + + +/* + LOCK ORDER + + inode lock (ilock) + dquot hash-chain lock (hashlock) + xqm dquot freelist lock (freelistlock + mount's dquot list lock (mplistlock) + user dquot lock - lock ordering among dquots is based on the uid or gid + group dquot lock - similar to udquots. Between the two dquots, the udquot + has to be locked first. + pin lock - the dquot lock must be held to take this lock. + flush lock - ditto. +*/ + +#ifdef DEBUG +xfs_buftarg_t *xfs_dqerror_target; +int xfs_do_dqerror; +int xfs_dqreq_num; +int xfs_dqerror_mod = 33; +#endif + +static struct lock_class_key xfs_dquot_other_class; + +/* + * Allocate and initialize a dquot. We don't always allocate fresh memory; + * we try to reclaim a free dquot if the number of incore dquots are above + * a threshold. + * The only field inside the core that gets initialized at this point + * is the d_id field. The idea is to fill in the entire q_core + * when we read in the on disk dquot. + */ +STATIC xfs_dquot_t * +xfs_qm_dqinit( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type) +{ + xfs_dquot_t *dqp; + boolean_t brandnewdquot; + + brandnewdquot = xfs_qm_dqalloc_incore(&dqp); + dqp->dq_flags = type; + dqp->q_core.d_id = cpu_to_be32(id); + dqp->q_mount = mp; + + /* + * No need to re-initialize these if this is a reclaimed dquot. + */ + if (brandnewdquot) { + INIT_LIST_HEAD(&dqp->q_freelist); + mutex_init(&dqp->q_qlock); + init_waitqueue_head(&dqp->q_pinwait); + + /* + * Because we want to use a counting completion, complete + * the flush completion once to allow a single access to + * the flush completion without blocking. + */ + init_completion(&dqp->q_flush); + complete(&dqp->q_flush); + + trace_xfs_dqinit(dqp); + } else { + /* + * Only the q_core portion was zeroed in dqreclaim_one(). + * So, we need to reset others. + */ + dqp->q_nrefs = 0; + dqp->q_blkno = 0; + INIT_LIST_HEAD(&dqp->q_mplist); + INIT_LIST_HEAD(&dqp->q_hashlist); + dqp->q_bufoffset = 0; + dqp->q_fileoffset = 0; + dqp->q_transp = NULL; + dqp->q_gdquot = NULL; + dqp->q_res_bcount = 0; + dqp->q_res_icount = 0; + dqp->q_res_rtbcount = 0; + atomic_set(&dqp->q_pincount, 0); + dqp->q_hash = NULL; + ASSERT(list_empty(&dqp->q_freelist)); + + trace_xfs_dqreuse(dqp); + } + + /* + * In either case we need to make sure group quotas have a different + * lock class than user quotas, to make sure lockdep knows we can + * locks of one of each at the same time. + */ + if (!(type & XFS_DQ_USER)) + lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); + + /* + * log item gets initialized later + */ + return (dqp); +} + +/* + * This is called to free all the memory associated with a dquot + */ +void +xfs_qm_dqdestroy( + xfs_dquot_t *dqp) +{ + ASSERT(list_empty(&dqp->q_freelist)); + + mutex_destroy(&dqp->q_qlock); + kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); + + atomic_dec(&xfs_Gqm->qm_totaldquots); +} + +/* + * This is what a 'fresh' dquot inside a dquot chunk looks like on disk. + */ +STATIC void +xfs_qm_dqinit_core( + xfs_dqid_t id, + uint type, + xfs_dqblk_t *d) +{ + /* + * Caller has zero'd the entire dquot 'chunk' already. + */ + d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); + d->dd_diskdq.d_version = XFS_DQUOT_VERSION; + d->dd_diskdq.d_id = cpu_to_be32(id); + d->dd_diskdq.d_flags = type; +} + +/* + * If default limits are in force, push them into the dquot now. + * We overwrite the dquot limits only if they are zero and this + * is not the root dquot. + */ +void +xfs_qm_adjust_dqlimits( + xfs_mount_t *mp, + xfs_disk_dquot_t *d) +{ + xfs_quotainfo_t *q = mp->m_quotainfo; + + ASSERT(d->d_id); + + if (q->qi_bsoftlimit && !d->d_blk_softlimit) + d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit); + if (q->qi_bhardlimit && !d->d_blk_hardlimit) + d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit); + if (q->qi_isoftlimit && !d->d_ino_softlimit) + d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit); + if (q->qi_ihardlimit && !d->d_ino_hardlimit) + d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit); + if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit) + d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit); + if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit) + d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit); +} + +/* + * Check the limits and timers of a dquot and start or reset timers + * if necessary. + * This gets called even when quota enforcement is OFF, which makes our + * life a little less complicated. (We just don't reject any quota + * reservations in that case, when enforcement is off). + * We also return 0 as the values of the timers in Q_GETQUOTA calls, when + * enforcement's off. + * In contrast, warnings are a little different in that they don't + * 'automatically' get started when limits get exceeded. They do + * get reset to zero, however, when we find the count to be under + * the soft limit (they are only ever set non-zero via userspace). + */ +void +xfs_qm_adjust_dqtimers( + xfs_mount_t *mp, + xfs_disk_dquot_t *d) +{ + ASSERT(d->d_id); + +#ifdef DEBUG + if (d->d_blk_hardlimit) + ASSERT(be64_to_cpu(d->d_blk_softlimit) <= + be64_to_cpu(d->d_blk_hardlimit)); + if (d->d_ino_hardlimit) + ASSERT(be64_to_cpu(d->d_ino_softlimit) <= + be64_to_cpu(d->d_ino_hardlimit)); + if (d->d_rtb_hardlimit) + ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= + be64_to_cpu(d->d_rtb_hardlimit)); +#endif + + if (!d->d_btimer) { + if ((d->d_blk_softlimit && + (be64_to_cpu(d->d_bcount) >= + be64_to_cpu(d->d_blk_softlimit))) || + (d->d_blk_hardlimit && + (be64_to_cpu(d->d_bcount) >= + be64_to_cpu(d->d_blk_hardlimit)))) { + d->d_btimer = cpu_to_be32(get_seconds() + + mp->m_quotainfo->qi_btimelimit); + } else { + d->d_bwarns = 0; + } + } else { + if ((!d->d_blk_softlimit || + (be64_to_cpu(d->d_bcount) < + be64_to_cpu(d->d_blk_softlimit))) && + (!d->d_blk_hardlimit || + (be64_to_cpu(d->d_bcount) < + be64_to_cpu(d->d_blk_hardlimit)))) { + d->d_btimer = 0; + } + } + + if (!d->d_itimer) { + if ((d->d_ino_softlimit && + (be64_to_cpu(d->d_icount) >= + be64_to_cpu(d->d_ino_softlimit))) || + (d->d_ino_hardlimit && + (be64_to_cpu(d->d_icount) >= + be64_to_cpu(d->d_ino_hardlimit)))) { + d->d_itimer = cpu_to_be32(get_seconds() + + mp->m_quotainfo->qi_itimelimit); + } else { + d->d_iwarns = 0; + } + } else { + if ((!d->d_ino_softlimit || + (be64_to_cpu(d->d_icount) < + be64_to_cpu(d->d_ino_softlimit))) && + (!d->d_ino_hardlimit || + (be64_to_cpu(d->d_icount) < + be64_to_cpu(d->d_ino_hardlimit)))) { + d->d_itimer = 0; + } + } + + if (!d->d_rtbtimer) { + if ((d->d_rtb_softlimit && + (be64_to_cpu(d->d_rtbcount) >= + be64_to_cpu(d->d_rtb_softlimit))) || + (d->d_rtb_hardlimit && + (be64_to_cpu(d->d_rtbcount) >= + be64_to_cpu(d->d_rtb_hardlimit)))) { + d->d_rtbtimer = cpu_to_be32(get_seconds() + + mp->m_quotainfo->qi_rtbtimelimit); + } else { + d->d_rtbwarns = 0; + } + } else { + if ((!d->d_rtb_softlimit || + (be64_to_cpu(d->d_rtbcount) < + be64_to_cpu(d->d_rtb_softlimit))) && + (!d->d_rtb_hardlimit || + (be64_to_cpu(d->d_rtbcount) < + be64_to_cpu(d->d_rtb_hardlimit)))) { + d->d_rtbtimer = 0; + } + } +} + +/* + * initialize a buffer full of dquots and log the whole thing + */ +STATIC void +xfs_qm_init_dquot_blk( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + xfs_buf_t *bp) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + xfs_dqblk_t *d; + int curid, i; + + ASSERT(tp); + ASSERT(xfs_buf_islocked(bp)); + + d = bp->b_addr; + + /* + * ID of the first dquot in the block - id's are zero based. + */ + curid = id - (id % q->qi_dqperchunk); + ASSERT(curid >= 0); + memset(d, 0, BBTOB(q->qi_dqchunklen)); + for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) + xfs_qm_dqinit_core(curid, type, d); + xfs_trans_dquot_buf(tp, bp, + (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF : + ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF : + XFS_BLF_GDQUOT_BUF))); + xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); +} + + + +/* + * Allocate a block and fill it with dquots. + * This is called when the bmapi finds a hole. + */ +STATIC int +xfs_qm_dqalloc( + xfs_trans_t **tpp, + xfs_mount_t *mp, + xfs_dquot_t *dqp, + xfs_inode_t *quotip, + xfs_fileoff_t offset_fsb, + xfs_buf_t **O_bpp) +{ + xfs_fsblock_t firstblock; + xfs_bmap_free_t flist; + xfs_bmbt_irec_t map; + int nmaps, error, committed; + xfs_buf_t *bp; + xfs_trans_t *tp = *tpp; + + ASSERT(tp != NULL); + + trace_xfs_dqalloc(dqp); + + /* + * Initialize the bmap freelist prior to calling bmapi code. + */ + xfs_bmap_init(&flist, &firstblock); + xfs_ilock(quotip, XFS_ILOCK_EXCL); + /* + * Return if this type of quotas is turned off while we didn't + * have an inode lock + */ + if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + xfs_iunlock(quotip, XFS_ILOCK_EXCL); + return (ESRCH); + } + + xfs_trans_ijoin_ref(tp, quotip, XFS_ILOCK_EXCL); + nmaps = 1; + if ((error = xfs_bmapi(tp, quotip, + offset_fsb, XFS_DQUOT_CLUSTER_SIZE_FSB, + XFS_BMAPI_METADATA | XFS_BMAPI_WRITE, + &firstblock, + XFS_QM_DQALLOC_SPACE_RES(mp), + &map, &nmaps, &flist))) { + goto error0; + } + ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); + ASSERT(nmaps == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + + /* + * Keep track of the blkno to save a lookup later + */ + dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); + + /* now we can just get the buffer (there's nothing to read yet) */ + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, + dqp->q_blkno, + mp->m_quotainfo->qi_dqchunklen, + 0); + if (!bp || (error = xfs_buf_geterror(bp))) + goto error1; + /* + * Make a chunk of dquots out of this buffer and log + * the entire thing. + */ + xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), + dqp->dq_flags & XFS_DQ_ALLTYPES, bp); + + /* + * xfs_bmap_finish() may commit the current transaction and + * start a second transaction if the freelist is not empty. + * + * Since we still want to modify this buffer, we need to + * ensure that the buffer is not released on commit of + * the first transaction and ensure the buffer is added to the + * second transaction. + * + * If there is only one transaction then don't stop the buffer + * from being released when it commits later on. + */ + + xfs_trans_bhold(tp, bp); + + if ((error = xfs_bmap_finish(tpp, &flist, &committed))) { + goto error1; + } + + if (committed) { + tp = *tpp; + xfs_trans_bjoin(tp, bp); + } else { + xfs_trans_bhold_release(tp, bp); + } + + *O_bpp = bp; + return 0; + + error1: + xfs_bmap_cancel(&flist); + error0: + xfs_iunlock(quotip, XFS_ILOCK_EXCL); + + return (error); +} + +/* + * Maps a dquot to the buffer containing its on-disk version. + * This returns a ptr to the buffer containing the on-disk dquot + * in the bpp param, and a ptr to the on-disk dquot within that buffer + */ +STATIC int +xfs_qm_dqtobp( + xfs_trans_t **tpp, + xfs_dquot_t *dqp, + xfs_disk_dquot_t **O_ddpp, + xfs_buf_t **O_bpp, + uint flags) +{ + xfs_bmbt_irec_t map; + int nmaps = 1, error; + xfs_buf_t *bp; + xfs_inode_t *quotip = XFS_DQ_TO_QIP(dqp); + xfs_mount_t *mp = dqp->q_mount; + xfs_disk_dquot_t *ddq; + xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id); + xfs_trans_t *tp = (tpp ? *tpp : NULL); + + dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; + + xfs_ilock(quotip, XFS_ILOCK_SHARED); + if (XFS_IS_THIS_QUOTA_OFF(dqp)) { + /* + * Return if this type of quotas is turned off while we + * didn't have the quota inode lock. + */ + xfs_iunlock(quotip, XFS_ILOCK_SHARED); + return ESRCH; + } + + /* + * Find the block map; no allocations yet + */ + error = xfs_bmapi(NULL, quotip, dqp->q_fileoffset, + XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA, + NULL, 0, &map, &nmaps, NULL); + + xfs_iunlock(quotip, XFS_ILOCK_SHARED); + if (error) + return error; + + ASSERT(nmaps == 1); + ASSERT(map.br_blockcount == 1); + + /* + * Offset of dquot in the (fixed sized) dquot chunk. + */ + dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * + sizeof(xfs_dqblk_t); + + ASSERT(map.br_startblock != DELAYSTARTBLOCK); + if (map.br_startblock == HOLESTARTBLOCK) { + /* + * We don't allocate unless we're asked to + */ + if (!(flags & XFS_QMOPT_DQALLOC)) + return ENOENT; + + ASSERT(tp); + error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, + dqp->q_fileoffset, &bp); + if (error) + return error; + tp = *tpp; + } else { + trace_xfs_dqtobp_read(dqp); + + /* + * store the blkno etc so that we don't have to do the + * mapping all the time + */ + dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); + + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, + dqp->q_blkno, + mp->m_quotainfo->qi_dqchunklen, + 0, &bp); + if (error || !bp) + return XFS_ERROR(error); + } + + ASSERT(xfs_buf_islocked(bp)); + + /* + * calculate the location of the dquot inside the buffer. + */ + ddq = bp->b_addr + dqp->q_bufoffset; + + /* + * A simple sanity check in case we got a corrupted dquot... + */ + error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, + flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), + "dqtobp"); + if (error) { + if (!(flags & XFS_QMOPT_DQREPAIR)) { + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EIO); + } + } + + *O_bpp = bp; + *O_ddpp = ddq; + + return (0); +} + + +/* + * Read in the ondisk dquot using dqtobp() then copy it to an incore version, + * and release the buffer immediately. + * + */ +/* ARGSUSED */ +STATIC int +xfs_qm_dqread( + xfs_trans_t **tpp, + xfs_dqid_t id, + xfs_dquot_t *dqp, /* dquot to get filled in */ + uint flags) +{ + xfs_disk_dquot_t *ddqp; + xfs_buf_t *bp; + int error; + xfs_trans_t *tp; + + ASSERT(tpp); + + trace_xfs_dqread(dqp); + + /* + * get a pointer to the on-disk dquot and the buffer containing it + * dqp already knows its own type (GROUP/USER). + */ + if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) { + return (error); + } + tp = *tpp; + + /* copy everything from disk dquot to the incore dquot */ + memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); + ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); + xfs_qm_dquot_logitem_init(dqp); + + /* + * Reservation counters are defined as reservation plus current usage + * to avoid having to add every time. + */ + dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); + dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); + dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); + + /* Mark the buf so that this will stay incore a little longer */ + XFS_BUF_SET_VTYPE_REF(bp, B_FS_DQUOT, XFS_DQUOT_REF); + + /* + * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) + * So we need to release with xfs_trans_brelse(). + * The strategy here is identical to that of inodes; we lock + * the dquot in xfs_qm_dqget() before making it accessible to + * others. This is because dquots, like inodes, need a good level of + * concurrency, and we don't want to take locks on the entire buffers + * for dquot accesses. + * Note also that the dquot buffer may even be dirty at this point, if + * this particular dquot was repaired. We still aren't afraid to + * brelse it because we have the changes incore. + */ + ASSERT(xfs_buf_islocked(bp)); + xfs_trans_brelse(tp, bp); + + return (error); +} + + +/* + * allocate an incore dquot from the kernel heap, + * and fill its core with quota information kept on disk. + * If XFS_QMOPT_DQALLOC is set, it'll allocate a dquot on disk + * if it wasn't already allocated. + */ +STATIC int +xfs_qm_idtodq( + xfs_mount_t *mp, + xfs_dqid_t id, /* gid or uid, depending on type */ + uint type, /* UDQUOT or GDQUOT */ + uint flags, /* DQALLOC, DQREPAIR */ + xfs_dquot_t **O_dqpp)/* OUT : incore dquot, not locked */ +{ + xfs_dquot_t *dqp; + int error; + xfs_trans_t *tp; + int cancelflags=0; + + dqp = xfs_qm_dqinit(mp, id, type); + tp = NULL; + if (flags & XFS_QMOPT_DQALLOC) { + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); + error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), + XFS_WRITE_LOG_RES(mp) + + BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + + 128, + 0, + XFS_TRANS_PERM_LOG_RES, + XFS_WRITE_LOG_COUNT); + if (error) { + cancelflags = 0; + goto error0; + } + cancelflags = XFS_TRANS_RELEASE_LOG_RES; + } + + /* + * Read it from disk; xfs_dqread() takes care of + * all the necessary initialization of dquot's fields (locks, etc) + */ + if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) { + /* + * This can happen if quotas got turned off (ESRCH), + * or if the dquot didn't exist on disk and we ask to + * allocate (ENOENT). + */ + trace_xfs_dqread_fail(dqp); + cancelflags |= XFS_TRANS_ABORT; + goto error0; + } + if (tp) { + if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) + goto error1; + } + + *O_dqpp = dqp; + return (0); + + error0: + ASSERT(error); + if (tp) + xfs_trans_cancel(tp, cancelflags); + error1: + xfs_qm_dqdestroy(dqp); + *O_dqpp = NULL; + return (error); +} + +/* + * Lookup a dquot in the incore dquot hashtable. We keep two separate + * hashtables for user and group dquots; and, these are global tables + * inside the XQM, not per-filesystem tables. + * The hash chain must be locked by caller, and it is left locked + * on return. Returning dquot is locked. + */ +STATIC int +xfs_qm_dqlookup( + xfs_mount_t *mp, + xfs_dqid_t id, + xfs_dqhash_t *qh, + xfs_dquot_t **O_dqpp) +{ + xfs_dquot_t *dqp; + uint flist_locked; + + ASSERT(mutex_is_locked(&qh->qh_lock)); + + flist_locked = B_FALSE; + + /* + * Traverse the hashchain looking for a match + */ + list_for_each_entry(dqp, &qh->qh_list, q_hashlist) { + /* + * We already have the hashlock. We don't need the + * dqlock to look at the id field of the dquot, since the + * id can't be modified without the hashlock anyway. + */ + if (be32_to_cpu(dqp->q_core.d_id) == id && dqp->q_mount == mp) { + trace_xfs_dqlookup_found(dqp); + + /* + * All in core dquots must be on the dqlist of mp + */ + ASSERT(!list_empty(&dqp->q_mplist)); + + xfs_dqlock(dqp); + if (dqp->q_nrefs == 0) { + ASSERT(!list_empty(&dqp->q_freelist)); + if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { + trace_xfs_dqlookup_want(dqp); + + /* + * We may have raced with dqreclaim_one() + * (and lost). So, flag that we don't + * want the dquot to be reclaimed. + */ + dqp->dq_flags |= XFS_DQ_WANT; + xfs_dqunlock(dqp); + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); + xfs_dqlock(dqp); + dqp->dq_flags &= ~(XFS_DQ_WANT); + } + flist_locked = B_TRUE; + } + + /* + * id couldn't have changed; we had the hashlock all + * along + */ + ASSERT(be32_to_cpu(dqp->q_core.d_id) == id); + + if (flist_locked) { + if (dqp->q_nrefs != 0) { + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + flist_locked = B_FALSE; + } else { + /* take it off the freelist */ + trace_xfs_dqlookup_freelist(dqp); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + } + } + + XFS_DQHOLD(dqp); + + if (flist_locked) + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + /* + * move the dquot to the front of the hashchain + */ + ASSERT(mutex_is_locked(&qh->qh_lock)); + list_move(&dqp->q_hashlist, &qh->qh_list); + trace_xfs_dqlookup_done(dqp); + *O_dqpp = dqp; + return 0; + } + } + + *O_dqpp = NULL; + ASSERT(mutex_is_locked(&qh->qh_lock)); + return (1); +} + +/* + * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a + * a locked dquot, doing an allocation (if requested) as needed. + * When both an inode and an id are given, the inode's id takes precedence. + * That is, if the id changes while we don't hold the ilock inside this + * function, the new dquot is returned, not necessarily the one requested + * in the id argument. + */ +int +xfs_qm_dqget( + xfs_mount_t *mp, + xfs_inode_t *ip, /* locked inode (optional) */ + xfs_dqid_t id, /* uid/projid/gid depending on type */ + uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */ + uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */ + xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */ +{ + xfs_dquot_t *dqp; + xfs_dqhash_t *h; + uint version; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || + (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || + (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { + return (ESRCH); + } + h = XFS_DQ_HASH(mp, id, type); + +#ifdef DEBUG + if (xfs_do_dqerror) { + if ((xfs_dqerror_target == mp->m_ddev_targp) && + (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { + xfs_debug(mp, "Returning error in dqget"); + return (EIO); + } + } +#endif + + again: + +#ifdef DEBUG + ASSERT(type == XFS_DQ_USER || + type == XFS_DQ_PROJ || + type == XFS_DQ_GROUP); + if (ip) { + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + if (type == XFS_DQ_USER) + ASSERT(ip->i_udquot == NULL); + else + ASSERT(ip->i_gdquot == NULL); + } +#endif + mutex_lock(&h->qh_lock); + + /* + * Look in the cache (hashtable). + * The chain is kept locked during lookup. + */ + if (xfs_qm_dqlookup(mp, id, h, O_dqpp) == 0) { + XQM_STATS_INC(xqmstats.xs_qm_dqcachehits); + /* + * The dquot was found, moved to the front of the chain, + * taken off the freelist if it was on it, and locked + * at this point. Just unlock the hashchain and return. + */ + ASSERT(*O_dqpp); + ASSERT(XFS_DQ_IS_LOCKED(*O_dqpp)); + mutex_unlock(&h->qh_lock); + trace_xfs_dqget_hit(*O_dqpp); + return (0); /* success */ + } + XQM_STATS_INC(xqmstats.xs_qm_dqcachemisses); + + /* + * Dquot cache miss. We don't want to keep the inode lock across + * a (potential) disk read. Also we don't want to deal with the lock + * ordering between quotainode and this inode. OTOH, dropping the inode + * lock here means dealing with a chown that can happen before + * we re-acquire the lock. + */ + if (ip) + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * Save the hashchain version stamp, and unlock the chain, so that + * we don't keep the lock across a disk read + */ + version = h->qh_version; + mutex_unlock(&h->qh_lock); + + /* + * Allocate the dquot on the kernel heap, and read the ondisk + * portion off the disk. Also, do all the necessary initialization + * This can return ENOENT if dquot didn't exist on disk and we didn't + * ask it to allocate; ESRCH if quotas got turned off suddenly. + */ + if ((error = xfs_qm_idtodq(mp, id, type, + flags & (XFS_QMOPT_DQALLOC|XFS_QMOPT_DQREPAIR| + XFS_QMOPT_DOWARN), + &dqp))) { + if (ip) + xfs_ilock(ip, XFS_ILOCK_EXCL); + return (error); + } + + /* + * See if this is mount code calling to look at the overall quota limits + * which are stored in the id == 0 user or group's dquot. + * Since we may not have done a quotacheck by this point, just return + * the dquot without attaching it to any hashtables, lists, etc, or even + * taking a reference. + * The caller must dqdestroy this once done. + */ + if (flags & XFS_QMOPT_DQSUSER) { + ASSERT(id == 0); + ASSERT(! ip); + goto dqret; + } + + /* + * Dquot lock comes after hashlock in the lock ordering + */ + if (ip) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * A dquot could be attached to this inode by now, since + * we had dropped the ilock. + */ + if (type == XFS_DQ_USER) { + if (!XFS_IS_UQUOTA_ON(mp)) { + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); + } + if (ip->i_udquot) { + xfs_qm_dqdestroy(dqp); + dqp = ip->i_udquot; + xfs_dqlock(dqp); + goto dqret; + } + } else { + if (!XFS_IS_OQUOTA_ON(mp)) { + /* inode stays locked on return */ + xfs_qm_dqdestroy(dqp); + return XFS_ERROR(ESRCH); + } + if (ip->i_gdquot) { + xfs_qm_dqdestroy(dqp); + dqp = ip->i_gdquot; + xfs_dqlock(dqp); + goto dqret; + } + } + } + + /* + * Hashlock comes after ilock in lock order + */ + mutex_lock(&h->qh_lock); + if (version != h->qh_version) { + xfs_dquot_t *tmpdqp; + /* + * Now, see if somebody else put the dquot in the + * hashtable before us. This can happen because we didn't + * keep the hashchain lock. We don't have to worry about + * lock order between the two dquots here since dqp isn't + * on any findable lists yet. + */ + if (xfs_qm_dqlookup(mp, id, h, &tmpdqp) == 0) { + /* + * Duplicate found. Just throw away the new dquot + * and start over. + */ + xfs_qm_dqput(tmpdqp); + mutex_unlock(&h->qh_lock); + xfs_qm_dqdestroy(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dquot_dups); + goto again; + } + } + + /* + * Put the dquot at the beginning of the hash-chain and mp's list + * LOCK ORDER: hashlock, freelistlock, mplistlock, udqlock, gdqlock .. + */ + ASSERT(mutex_is_locked(&h->qh_lock)); + dqp->q_hash = h; + list_add(&dqp->q_hashlist, &h->qh_list); + h->qh_version++; + + /* + * Attach this dquot to this filesystem's list of all dquots, + * kept inside the mount structure in m_quotainfo field + */ + mutex_lock(&mp->m_quotainfo->qi_dqlist_lock); + + /* + * We return a locked dquot to the caller, with a reference taken + */ + xfs_dqlock(dqp); + dqp->q_nrefs = 1; + + list_add(&dqp->q_mplist, &mp->m_quotainfo->qi_dqlist); + mp->m_quotainfo->qi_dquots++; + mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); + mutex_unlock(&h->qh_lock); + dqret: + ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); + trace_xfs_dqget_miss(dqp); + *O_dqpp = dqp; + return (0); +} + + +/* + * Release a reference to the dquot (decrement ref-count) + * and unlock it. If there is a group quota attached to this + * dquot, carefully release that too without tripping over + * deadlocks'n'stuff. + */ +void +xfs_qm_dqput( + xfs_dquot_t *dqp) +{ + xfs_dquot_t *gdqp; + + ASSERT(dqp->q_nrefs > 0); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + trace_xfs_dqput(dqp); + + if (dqp->q_nrefs != 1) { + dqp->q_nrefs--; + xfs_dqunlock(dqp); + return; + } + + /* + * drop the dqlock and acquire the freelist and dqlock + * in the right order; but try to get it out-of-order first + */ + if (!mutex_trylock(&xfs_Gqm->qm_dqfrlist_lock)) { + trace_xfs_dqput_wait(dqp); + xfs_dqunlock(dqp); + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); + xfs_dqlock(dqp); + } + + while (1) { + gdqp = NULL; + + /* We can't depend on nrefs being == 1 here */ + if (--dqp->q_nrefs == 0) { + trace_xfs_dqput_free(dqp); + + list_add_tail(&dqp->q_freelist, &xfs_Gqm->qm_dqfrlist); + xfs_Gqm->qm_dqfrlist_cnt++; + + /* + * If we just added a udquot to the freelist, then + * we want to release the gdquot reference that + * it (probably) has. Otherwise it'll keep the + * gdquot from getting reclaimed. + */ + if ((gdqp = dqp->q_gdquot)) { + /* + * Avoid a recursive dqput call + */ + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + } + xfs_dqunlock(dqp); + + /* + * If we had a group quota inside the user quota as a hint, + * release it now. + */ + if (! gdqp) + break; + dqp = gdqp; + } + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); +} + +/* + * Release a dquot. Flush it if dirty, then dqput() it. + * dquot must not be locked. + */ +void +xfs_qm_dqrele( + xfs_dquot_t *dqp) +{ + if (!dqp) + return; + + trace_xfs_dqrele(dqp); + + xfs_dqlock(dqp); + /* + * We don't care to flush it if the dquot is dirty here. + * That will create stutters that we want to avoid. + * Instead we do a delayed write when we try to reclaim + * a dirty dquot. Also xfs_sync will take part of the burden... + */ + xfs_qm_dqput(dqp); +} + +/* + * This is the dquot flushing I/O completion routine. It is called + * from interrupt level when the buffer containing the dquot is + * flushed to disk. It is responsible for removing the dquot logitem + * from the AIL if it has not been re-logged, and unlocking the dquot's + * flush lock. This behavior is very similar to that of inodes.. + */ +STATIC void +xfs_qm_dqflush_done( + struct xfs_buf *bp, + struct xfs_log_item *lip) +{ + xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip; + xfs_dquot_t *dqp = qip->qli_dquot; + struct xfs_ail *ailp = lip->li_ailp; + + /* + * We only want to pull the item from the AIL if its + * location in the log has not changed since we started the flush. + * Thus, we only bother if the dquot's lsn has + * not changed. First we check the lsn outside the lock + * since it's cheaper, and then we recheck while + * holding the lock before removing the dquot from the AIL. + */ + if ((lip->li_flags & XFS_LI_IN_AIL) && + lip->li_lsn == qip->qli_flush_lsn) { + + /* xfs_trans_ail_delete() drops the AIL lock. */ + spin_lock(&ailp->xa_lock); + if (lip->li_lsn == qip->qli_flush_lsn) + xfs_trans_ail_delete(ailp, lip); + else + spin_unlock(&ailp->xa_lock); + } + + /* + * Release the dq's flush lock since we're done with it. + */ + xfs_dqfunlock(dqp); +} + +/* + * Write a modified dquot to disk. + * The dquot must be locked and the flush lock too taken by caller. + * The flush lock will not be unlocked until the dquot reaches the disk, + * but the dquot is free to be unlocked and modified by the caller + * in the interim. Dquot is still locked on return. This behavior is + * identical to that of inodes. + */ +int +xfs_qm_dqflush( + xfs_dquot_t *dqp, + uint flags) +{ + struct xfs_mount *mp = dqp->q_mount; + struct xfs_buf *bp; + struct xfs_disk_dquot *ddqp; + int error; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(!completion_done(&dqp->q_flush)); + + trace_xfs_dqflush(dqp); + + /* + * If not dirty, or it's pinned and we are not supposed to block, nada. + */ + if (!XFS_DQ_IS_DIRTY(dqp) || + (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) { + xfs_dqfunlock(dqp); + return 0; + } + xfs_qm_dqunpin_wait(dqp); + + /* + * This may have been unpinned because the filesystem is shutting + * down forcibly. If that's the case we must not write this dquot + * to disk, because the log record didn't make it to disk! + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + dqp->dq_flags &= ~XFS_DQ_DIRTY; + xfs_dqfunlock(dqp); + return XFS_ERROR(EIO); + } + + /* + * Get the buffer containing the on-disk dquot + */ + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, + mp->m_quotainfo->qi_dqchunklen, 0, &bp); + if (error) { + ASSERT(error != ENOENT); + xfs_dqfunlock(dqp); + return error; + } + + /* + * Calculate the location of the dquot inside the buffer. + */ + ddqp = bp->b_addr + dqp->q_bufoffset; + + /* + * A simple sanity check in case we got a corrupted dquot.. + */ + error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, + XFS_QMOPT_DOWARN, "dqflush (incore copy)"); + if (error) { + xfs_buf_relse(bp); + xfs_dqfunlock(dqp); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + return XFS_ERROR(EIO); + } + + /* This is the only portion of data that needs to persist */ + memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t)); + + /* + * Clear the dirty field and remember the flush lsn for later use. + */ + dqp->dq_flags &= ~XFS_DQ_DIRTY; + + xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, + &dqp->q_logitem.qli_item.li_lsn); + + /* + * Attach an iodone routine so that we can remove this dquot from the + * AIL and release the flush lock once the dquot is synced to disk. + */ + xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, + &dqp->q_logitem.qli_item); + + /* + * If the buffer is pinned then push on the log so we won't + * get stuck waiting in the write for too long. + */ + if (xfs_buf_ispinned(bp)) { + trace_xfs_dqflush_force(dqp); + xfs_log_force(mp, 0); + } + + if (flags & SYNC_WAIT) + error = xfs_bwrite(mp, bp); + else + xfs_bdwrite(mp, bp); + + trace_xfs_dqflush_done(dqp); + + /* + * dqp is still locked, but caller is free to unlock it now. + */ + return error; + +} + +int +xfs_qm_dqlock_nowait( + xfs_dquot_t *dqp) +{ + return mutex_trylock(&dqp->q_qlock); +} + +void +xfs_dqlock( + xfs_dquot_t *dqp) +{ + mutex_lock(&dqp->q_qlock); +} + +void +xfs_dqunlock( + xfs_dquot_t *dqp) +{ + mutex_unlock(&(dqp->q_qlock)); + if (dqp->q_logitem.qli_dquot == dqp) { + /* Once was dqp->q_mount, but might just have been cleared */ + xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, + (xfs_log_item_t*)&(dqp->q_logitem)); + } +} + + +void +xfs_dqunlock_nonotify( + xfs_dquot_t *dqp) +{ + mutex_unlock(&(dqp->q_qlock)); +} + +/* + * Lock two xfs_dquot structures. + * + * To avoid deadlocks we always lock the quota structure with + * the lowerd id first. + */ +void +xfs_dqlock2( + xfs_dquot_t *d1, + xfs_dquot_t *d2) +{ + if (d1 && d2) { + ASSERT(d1 != d2); + if (be32_to_cpu(d1->q_core.d_id) > + be32_to_cpu(d2->q_core.d_id)) { + mutex_lock(&d2->q_qlock); + mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); + } else { + mutex_lock(&d1->q_qlock); + mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED); + } + } else if (d1) { + mutex_lock(&d1->q_qlock); + } else if (d2) { + mutex_lock(&d2->q_qlock); + } +} + + +/* + * Take a dquot out of the mount's dqlist as well as the hashlist. + * This is called via unmount as well as quotaoff, and the purge + * will always succeed unless there are soft (temp) references + * outstanding. + * + * This returns 0 if it was purged, 1 if it wasn't. It's not an error code + * that we're returning! XXXsup - not cool. + */ +/* ARGSUSED */ +int +xfs_qm_dqpurge( + xfs_dquot_t *dqp) +{ + xfs_dqhash_t *qh = dqp->q_hash; + xfs_mount_t *mp = dqp->q_mount; + + ASSERT(mutex_is_locked(&mp->m_quotainfo->qi_dqlist_lock)); + ASSERT(mutex_is_locked(&dqp->q_hash->qh_lock)); + + xfs_dqlock(dqp); + /* + * We really can't afford to purge a dquot that is + * referenced, because these are hard refs. + * It shouldn't happen in general because we went thru _all_ inodes in + * dqrele_all_inodes before calling this and didn't let the mountlock go. + * However it is possible that we have dquots with temporary + * references that are not attached to an inode. e.g. see xfs_setattr(). + */ + if (dqp->q_nrefs != 0) { + xfs_dqunlock(dqp); + mutex_unlock(&dqp->q_hash->qh_lock); + return (1); + } + + ASSERT(!list_empty(&dqp->q_freelist)); + + /* + * If we're turning off quotas, we have to make sure that, for + * example, we don't delete quota disk blocks while dquots are + * in the process of getting written to those disk blocks. + * This dquot might well be on AIL, and we can't leave it there + * if we're turning off quotas. Basically, we need this flush + * lock, and are willing to block on it. + */ + if (!xfs_dqflock_nowait(dqp)) { + /* + * Block on the flush lock after nudging dquot buffer, + * if it is incore. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + + /* + * XXXIf we're turning this type of quotas off, we don't care + * about the dirty metadata sitting in this dquot. OTOH, if + * we're unmounting, we do care, so we flush it and wait. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + int error; + + /* dqflush unlocks dqflock */ + /* + * Given that dqpurge is a very rare occurrence, it is OK + * that we're holding the hashlist and mplist locks + * across the disk write. But, ... XXXsup + * + * We don't care about getting disk errors here. We need + * to purge this dquot anyway, so we go ahead regardless. + */ + error = xfs_qm_dqflush(dqp, SYNC_WAIT); + if (error) + xfs_warn(mp, "%s: dquot %p flush failed", + __func__, dqp); + xfs_dqflock(dqp); + } + ASSERT(atomic_read(&dqp->q_pincount) == 0); + ASSERT(XFS_FORCED_SHUTDOWN(mp) || + !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); + + list_del_init(&dqp->q_hashlist); + qh->qh_version++; + list_del_init(&dqp->q_mplist); + mp->m_quotainfo->qi_dqreclaims++; + mp->m_quotainfo->qi_dquots--; + /* + * XXX Move this to the front of the freelist, if we can get the + * freelist lock. + */ + ASSERT(!list_empty(&dqp->q_freelist)); + + dqp->q_mount = NULL; + dqp->q_hash = NULL; + dqp->dq_flags = XFS_DQ_INACTIVE; + memset(&dqp->q_core, 0, sizeof(dqp->q_core)); + xfs_dqfunlock(dqp); + xfs_dqunlock(dqp); + mutex_unlock(&qh->qh_lock); + return (0); +} + + +/* + * Give the buffer a little push if it is incore and + * wait on the flush lock. + */ +void +xfs_qm_dqflock_pushbuf_wait( + xfs_dquot_t *dqp) +{ + xfs_mount_t *mp = dqp->q_mount; + xfs_buf_t *bp; + + /* + * Check to see if the dquot has been flushed delayed + * write. If so, grab its buffer and send it + * out immediately. We'll be able to acquire + * the flush lock when the I/O completes. + */ + bp = xfs_incore(mp->m_ddev_targp, dqp->q_blkno, + mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); + if (!bp) + goto out_lock; + + if (XFS_BUF_ISDELAYWRITE(bp)) { + if (xfs_buf_ispinned(bp)) + xfs_log_force(mp, 0); + xfs_buf_delwri_promote(bp); + wake_up_process(bp->b_target->bt_task); + } + xfs_buf_relse(bp); +out_lock: + xfs_dqflock(dqp); +} diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h new file mode 100644 index 0000000..34b7e94 --- /dev/null +++ b/fs/xfs/xfs_dquot.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_DQUOT_H__ +#define __XFS_DQUOT_H__ + +/* + * Dquots are structures that hold quota information about a user or a group, + * much like inodes are for files. In fact, dquots share many characteristics + * with inodes. However, dquots can also be a centralized resource, relative + * to a collection of inodes. In this respect, dquots share some characteristics + * of the superblock. + * XFS dquots exploit both those in its algorithms. They make every attempt + * to not be a bottleneck when quotas are on and have minimal impact, if any, + * when quotas are off. + */ + +/* + * The hash chain headers (hash buckets) + */ +typedef struct xfs_dqhash { + struct list_head qh_list; + struct mutex qh_lock; + uint qh_version; /* ever increasing version */ + uint qh_nelems; /* number of dquots on the list */ +} xfs_dqhash_t; + +struct xfs_mount; +struct xfs_trans; + +/* + * The incore dquot structure + */ +typedef struct xfs_dquot { + uint dq_flags; /* various flags (XFS_DQ_*) */ + struct list_head q_freelist; /* global free list of dquots */ + struct list_head q_mplist; /* mount's list of dquots */ + struct list_head q_hashlist; /* gloabl hash list of dquots */ + xfs_dqhash_t *q_hash; /* the hashchain header */ + struct xfs_mount*q_mount; /* filesystem this relates to */ + struct xfs_trans*q_transp; /* trans this belongs to currently */ + uint q_nrefs; /* # active refs from inodes */ + xfs_daddr_t q_blkno; /* blkno of dquot buffer */ + int q_bufoffset; /* off of dq in buffer (# dquots) */ + xfs_fileoff_t q_fileoffset; /* offset in quotas file */ + + struct xfs_dquot*q_gdquot; /* group dquot, hint only */ + xfs_disk_dquot_t q_core; /* actual usage & quotas */ + xfs_dq_logitem_t q_logitem; /* dquot log item */ + xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */ + xfs_qcnt_t q_res_icount; /* total inos allocd+reserved */ + xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ + struct mutex q_qlock; /* quota lock */ + struct completion q_flush; /* flush completion queue */ + atomic_t q_pincount; /* dquot pin count */ + wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ +} xfs_dquot_t; + +/* + * Lock hierarchy for q_qlock: + * XFS_QLOCK_NORMAL is the implicit default, + * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2 + */ +enum { + XFS_QLOCK_NORMAL = 0, + XFS_QLOCK_NESTED, +}; + +#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) + +/* + * Manage the q_flush completion queue embedded in the dquot. This completion + * queue synchronizes processes attempting to flush the in-core dquot back to + * disk. + */ +static inline void xfs_dqflock(xfs_dquot_t *dqp) +{ + wait_for_completion(&dqp->q_flush); +} + +static inline int xfs_dqflock_nowait(xfs_dquot_t *dqp) +{ + return try_wait_for_completion(&dqp->q_flush); +} + +static inline void xfs_dqfunlock(xfs_dquot_t *dqp) +{ + complete(&dqp->q_flush); +} + +#define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) +#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) +#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) +#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) +#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) +#define XFS_DQ_TO_QINF(dqp) ((dqp)->q_mount->m_quotainfo) +#define XFS_DQ_TO_QIP(dqp) (XFS_QM_ISUDQ(dqp) ? \ + XFS_DQ_TO_QINF(dqp)->qi_uquotaip : \ + XFS_DQ_TO_QINF(dqp)->qi_gquotaip) + +#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \ + (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ + (XFS_IS_OQUOTA_ON((d)->q_mount)))) + +extern void xfs_qm_dqdestroy(xfs_dquot_t *); +extern int xfs_qm_dqflush(xfs_dquot_t *, uint); +extern int xfs_qm_dqpurge(xfs_dquot_t *); +extern void xfs_qm_dqunpin_wait(xfs_dquot_t *); +extern int xfs_qm_dqlock_nowait(xfs_dquot_t *); +extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp); +extern void xfs_qm_adjust_dqtimers(xfs_mount_t *, + xfs_disk_dquot_t *); +extern void xfs_qm_adjust_dqlimits(xfs_mount_t *, + xfs_disk_dquot_t *); +extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *, + xfs_dqid_t, uint, uint, xfs_dquot_t **); +extern void xfs_qm_dqput(xfs_dquot_t *); +extern void xfs_dqlock(xfs_dquot_t *); +extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *); +extern void xfs_dqunlock(xfs_dquot_t *); +extern void xfs_dqunlock_nonotify(xfs_dquot_t *); + +#endif /* __XFS_DQUOT_H__ */ diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c new file mode 100644 index 0000000..9e0e2fa --- /dev/null +++ b/fs/xfs/xfs_dquot_item.c @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" +#include "xfs_qm.h" + +static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip) +{ + return container_of(lip, struct xfs_dq_logitem, qli_item); +} + +/* + * returns the number of iovecs needed to log the given dquot item. + */ +STATIC uint +xfs_qm_dquot_logitem_size( + struct xfs_log_item *lip) +{ + /* + * we need only two iovecs, one for the format, one for the real thing + */ + return 2; +} + +/* + * fills in the vector of log iovecs for the given dquot log item. + */ +STATIC void +xfs_qm_dquot_logitem_format( + struct xfs_log_item *lip, + struct xfs_log_iovec *logvec) +{ + struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); + + logvec->i_addr = &qlip->qli_format; + logvec->i_len = sizeof(xfs_dq_logformat_t); + logvec->i_type = XLOG_REG_TYPE_QFORMAT; + logvec++; + logvec->i_addr = &qlip->qli_dquot->q_core; + logvec->i_len = sizeof(xfs_disk_dquot_t); + logvec->i_type = XLOG_REG_TYPE_DQUOT; + + ASSERT(2 == lip->li_desc->lid_size); + qlip->qli_format.qlf_size = 2; + +} + +/* + * Increment the pin count of the given dquot. + */ +STATIC void +xfs_qm_dquot_logitem_pin( + struct xfs_log_item *lip) +{ + struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + atomic_inc(&dqp->q_pincount); +} + +/* + * Decrement the pin count of the given dquot, and wake up + * anyone in xfs_dqwait_unpin() if the count goes to 0. The + * dquot must have been previously pinned with a call to + * xfs_qm_dquot_logitem_pin(). + */ +STATIC void +xfs_qm_dquot_logitem_unpin( + struct xfs_log_item *lip, + int remove) +{ + struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; + + ASSERT(atomic_read(&dqp->q_pincount) > 0); + if (atomic_dec_and_test(&dqp->q_pincount)) + wake_up(&dqp->q_pinwait); +} + +/* + * Given the logitem, this writes the corresponding dquot entry to disk + * asynchronously. This is called with the dquot entry securely locked; + * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot + * at the end. + */ +STATIC void +xfs_qm_dquot_logitem_push( + struct xfs_log_item *lip) +{ + struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; + int error; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(!completion_done(&dqp->q_flush)); + + /* + * Since we were able to lock the dquot's flush lock and + * we found it on the AIL, the dquot must be dirty. This + * is because the dquot is removed from the AIL while still + * holding the flush lock in xfs_dqflush_done(). Thus, if + * we found it in the AIL and were able to obtain the flush + * lock without sleeping, then there must not have been + * anyone in the process of flushing the dquot. + */ + error = xfs_qm_dqflush(dqp, 0); + if (error) + xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p", + __func__, error, dqp); + xfs_dqunlock(dqp); +} + +STATIC xfs_lsn_t +xfs_qm_dquot_logitem_committed( + struct xfs_log_item *lip, + xfs_lsn_t lsn) +{ + /* + * We always re-log the entire dquot when it becomes dirty, + * so, the latest copy _is_ the only one that matters. + */ + return lsn; +} + +/* + * This is called to wait for the given dquot to be unpinned. + * Most of these pin/unpin routines are plagiarized from inode code. + */ +void +xfs_qm_dqunpin_wait( + struct xfs_dquot *dqp) +{ + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + if (atomic_read(&dqp->q_pincount) == 0) + return; + + /* + * Give the log a push so we don't wait here too long. + */ + xfs_log_force(dqp->q_mount, 0); + wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); +} + +/* + * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that + * the dquot is locked by us, but the flush lock isn't. So, here we are + * going to see if the relevant dquot buffer is incore, waiting on DELWRI. + * If so, we want to push it out to help us take this item off the AIL as soon + * as possible. + * + * We must not be holding the AIL lock at this point. Calling incore() to + * search the buffer cache can be a time consuming thing, and AIL lock is a + * spinlock. + */ +STATIC void +xfs_qm_dquot_logitem_pushbuf( + struct xfs_log_item *lip) +{ + struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); + struct xfs_dquot *dqp = qlip->qli_dquot; + struct xfs_buf *bp; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + /* + * If flushlock isn't locked anymore, chances are that the + * inode flush completed and the inode was taken off the AIL. + * So, just get out. + */ + if (completion_done(&dqp->q_flush) || + !(lip->li_flags & XFS_LI_IN_AIL)) { + xfs_dqunlock(dqp); + return; + } + + bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, + dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); + xfs_dqunlock(dqp); + if (!bp) + return; + if (XFS_BUF_ISDELAYWRITE(bp)) + xfs_buf_delwri_promote(bp); + xfs_buf_relse(bp); +} + +/* + * This is called to attempt to lock the dquot associated with this + * dquot log item. Don't sleep on the dquot lock or the flush lock. + * If the flush lock is already held, indicating that the dquot has + * been or is in the process of being flushed, then see if we can + * find the dquot's buffer in the buffer cache without sleeping. If + * we can and it is marked delayed write, then we want to send it out. + * We delay doing so until the push routine, though, to avoid sleeping + * in any device strategy routines. + */ +STATIC uint +xfs_qm_dquot_logitem_trylock( + struct xfs_log_item *lip) +{ + struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; + + if (atomic_read(&dqp->q_pincount) > 0) + return XFS_ITEM_PINNED; + + if (!xfs_qm_dqlock_nowait(dqp)) + return XFS_ITEM_LOCKED; + + if (!xfs_dqflock_nowait(dqp)) { + /* + * dquot has already been flushed to the backing buffer, + * leave it locked, pushbuf routine will unlock it. + */ + return XFS_ITEM_PUSHBUF; + } + + ASSERT(lip->li_flags & XFS_LI_IN_AIL); + return XFS_ITEM_SUCCESS; +} + +/* + * Unlock the dquot associated with the log item. + * Clear the fields of the dquot and dquot log item that + * are specific to the current transaction. If the + * hold flags is set, do not unlock the dquot. + */ +STATIC void +xfs_qm_dquot_logitem_unlock( + struct xfs_log_item *lip) +{ + struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + /* + * Clear the transaction pointer in the dquot + */ + dqp->q_transp = NULL; + + /* + * dquots are never 'held' from getting unlocked at the end of + * a transaction. Their locking and unlocking is hidden inside the + * transaction layer, within trans_commit. Hence, no LI_HOLD flag + * for the logitem. + */ + xfs_dqunlock(dqp); +} + +/* + * this needs to stamp an lsn into the dquot, I think. + * rpc's that look at user dquot's would then have to + * push on the dependency recorded in the dquot + */ +STATIC void +xfs_qm_dquot_logitem_committing( + struct xfs_log_item *lip, + xfs_lsn_t lsn) +{ +} + +/* + * This is the ops vector for dquots + */ +static struct xfs_item_ops xfs_dquot_item_ops = { + .iop_size = xfs_qm_dquot_logitem_size, + .iop_format = xfs_qm_dquot_logitem_format, + .iop_pin = xfs_qm_dquot_logitem_pin, + .iop_unpin = xfs_qm_dquot_logitem_unpin, + .iop_trylock = xfs_qm_dquot_logitem_trylock, + .iop_unlock = xfs_qm_dquot_logitem_unlock, + .iop_committed = xfs_qm_dquot_logitem_committed, + .iop_push = xfs_qm_dquot_logitem_push, + .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf, + .iop_committing = xfs_qm_dquot_logitem_committing +}; + +/* + * Initialize the dquot log item for a newly allocated dquot. + * The dquot isn't locked at this point, but it isn't on any of the lists + * either, so we don't care. + */ +void +xfs_qm_dquot_logitem_init( + struct xfs_dquot *dqp) +{ + struct xfs_dq_logitem *lp = &dqp->q_logitem; + + xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT, + &xfs_dquot_item_ops); + lp->qli_dquot = dqp; + lp->qli_format.qlf_type = XFS_LI_DQUOT; + lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id); + lp->qli_format.qlf_blkno = dqp->q_blkno; + lp->qli_format.qlf_len = 1; + /* + * This is just the offset of this dquot within its buffer + * (which is currently 1 FSB and probably won't change). + * Hence 32 bits for this offset should be just fine. + * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t)) + * here, and recompute it at recovery time. + */ + lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset; +} + +/*------------------ QUOTAOFF LOG ITEMS -------------------*/ + +static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip) +{ + return container_of(lip, struct xfs_qoff_logitem, qql_item); +} + + +/* + * This returns the number of iovecs needed to log the given quotaoff item. + * We only need 1 iovec for an quotaoff item. It just logs the + * quotaoff_log_format structure. + */ +STATIC uint +xfs_qm_qoff_logitem_size( + struct xfs_log_item *lip) +{ + return 1; +} + +/* + * This is called to fill in the vector of log iovecs for the + * given quotaoff log item. We use only 1 iovec, and we point that + * at the quotaoff_log_format structure embedded in the quotaoff item. + * It is at this point that we assert that all of the extent + * slots in the quotaoff item have been filled. + */ +STATIC void +xfs_qm_qoff_logitem_format( + struct xfs_log_item *lip, + struct xfs_log_iovec *log_vector) +{ + struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip); + + ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF); + + log_vector->i_addr = &qflip->qql_format; + log_vector->i_len = sizeof(xfs_qoff_logitem_t); + log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF; + qflip->qql_format.qf_size = 1; +} + +/* + * Pinning has no meaning for an quotaoff item, so just return. + */ +STATIC void +xfs_qm_qoff_logitem_pin( + struct xfs_log_item *lip) +{ +} + +/* + * Since pinning has no meaning for an quotaoff item, unpinning does + * not either. + */ +STATIC void +xfs_qm_qoff_logitem_unpin( + struct xfs_log_item *lip, + int remove) +{ +} + +/* + * Quotaoff items have no locking, so just return success. + */ +STATIC uint +xfs_qm_qoff_logitem_trylock( + struct xfs_log_item *lip) +{ + return XFS_ITEM_LOCKED; +} + +/* + * Quotaoff items have no locking or pushing, so return failure + * so that the caller doesn't bother with us. + */ +STATIC void +xfs_qm_qoff_logitem_unlock( + struct xfs_log_item *lip) +{ +} + +/* + * The quotaoff-start-item is logged only once and cannot be moved in the log, + * so simply return the lsn at which it's been logged. + */ +STATIC xfs_lsn_t +xfs_qm_qoff_logitem_committed( + struct xfs_log_item *lip, + xfs_lsn_t lsn) +{ + return lsn; +} + +/* + * There isn't much you can do to push on an quotaoff item. It is simply + * stuck waiting for the log to be flushed to disk. + */ +STATIC void +xfs_qm_qoff_logitem_push( + struct xfs_log_item *lip) +{ +} + + +STATIC xfs_lsn_t +xfs_qm_qoffend_logitem_committed( + struct xfs_log_item *lip, + xfs_lsn_t lsn) +{ + struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip); + struct xfs_qoff_logitem *qfs = qfe->qql_start_lip; + struct xfs_ail *ailp = qfs->qql_item.li_ailp; + + /* + * Delete the qoff-start logitem from the AIL. + * xfs_trans_ail_delete() drops the AIL lock. + */ + spin_lock(&ailp->xa_lock); + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); + + kmem_free(qfs); + kmem_free(qfe); + return (xfs_lsn_t)-1; +} + +/* + * XXX rcc - don't know quite what to do with this. I think we can + * just ignore it. The only time that isn't the case is if we allow + * the client to somehow see that quotas have been turned off in which + * we can't allow that to get back until the quotaoff hits the disk. + * So how would that happen? Also, do we need different routines for + * quotaoff start and quotaoff end? I suspect the answer is yes but + * to be sure, I need to look at the recovery code and see how quota off + * recovery is handled (do we roll forward or back or do something else). + * If we roll forwards or backwards, then we need two separate routines, + * one that does nothing and one that stamps in the lsn that matters + * (truly makes the quotaoff irrevocable). If we do something else, + * then maybe we don't need two. + */ +STATIC void +xfs_qm_qoff_logitem_committing( + struct xfs_log_item *lip, + xfs_lsn_t commit_lsn) +{ +} + +static struct xfs_item_ops xfs_qm_qoffend_logitem_ops = { + .iop_size = xfs_qm_qoff_logitem_size, + .iop_format = xfs_qm_qoff_logitem_format, + .iop_pin = xfs_qm_qoff_logitem_pin, + .iop_unpin = xfs_qm_qoff_logitem_unpin, + .iop_trylock = xfs_qm_qoff_logitem_trylock, + .iop_unlock = xfs_qm_qoff_logitem_unlock, + .iop_committed = xfs_qm_qoffend_logitem_committed, + .iop_push = xfs_qm_qoff_logitem_push, + .iop_committing = xfs_qm_qoff_logitem_committing +}; + +/* + * This is the ops vector shared by all quotaoff-start log items. + */ +static struct xfs_item_ops xfs_qm_qoff_logitem_ops = { + .iop_size = xfs_qm_qoff_logitem_size, + .iop_format = xfs_qm_qoff_logitem_format, + .iop_pin = xfs_qm_qoff_logitem_pin, + .iop_unpin = xfs_qm_qoff_logitem_unpin, + .iop_trylock = xfs_qm_qoff_logitem_trylock, + .iop_unlock = xfs_qm_qoff_logitem_unlock, + .iop_committed = xfs_qm_qoff_logitem_committed, + .iop_push = xfs_qm_qoff_logitem_push, + .iop_committing = xfs_qm_qoff_logitem_committing +}; + +/* + * Allocate and initialize an quotaoff item of the correct quota type(s). + */ +struct xfs_qoff_logitem * +xfs_qm_qoff_logitem_init( + struct xfs_mount *mp, + struct xfs_qoff_logitem *start, + uint flags) +{ + struct xfs_qoff_logitem *qf; + + qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP); + + xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ? + &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops); + qf->qql_item.li_mountp = mp; + qf->qql_format.qf_type = XFS_LI_QUOTAOFF; + qf->qql_format.qf_flags = flags; + qf->qql_start_lip = start; + return qf; +} diff --git a/fs/xfs/xfs_dquot_item.h b/fs/xfs/xfs_dquot_item.h new file mode 100644 index 0000000..5acae2a --- /dev/null +++ b/fs/xfs/xfs_dquot_item.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_DQUOT_ITEM_H__ +#define __XFS_DQUOT_ITEM_H__ + +struct xfs_dquot; +struct xfs_trans; +struct xfs_mount; +struct xfs_qoff_logitem; + +typedef struct xfs_dq_logitem { + xfs_log_item_t qli_item; /* common portion */ + struct xfs_dquot *qli_dquot; /* dquot ptr */ + xfs_lsn_t qli_flush_lsn; /* lsn at last flush */ + xfs_dq_logformat_t qli_format; /* logged structure */ +} xfs_dq_logitem_t; + +typedef struct xfs_qoff_logitem { + xfs_log_item_t qql_item; /* common portion */ + struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */ + xfs_qoff_logformat_t qql_format; /* logged structure */ +} xfs_qoff_logitem_t; + + +extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *); +extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *, + struct xfs_qoff_logitem *, uint); +extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *, + struct xfs_qoff_logitem *, uint); +extern void xfs_trans_log_quotaoff_item(struct xfs_trans *, + struct xfs_qoff_logitem *); + +#endif /* __XFS_DQUOT_ITEM_H__ */ diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c new file mode 100644 index 0000000..75e5d32 --- /dev/null +++ b/fs/xfs/xfs_export.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2004-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir2.h" +#include "xfs_mount.h" +#include "xfs_export.h" +#include "xfs_vnodeops.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_inode_item.h" +#include "xfs_trace.h" + +/* + * Note that we only accept fileids which are long enough rather than allow + * the parent generation number to default to zero. XFS considers zero a + * valid generation number not an invalid/wildcard value. + */ +static int xfs_fileid_length(int fileid_type) +{ + switch (fileid_type) { + case FILEID_INO32_GEN: + return 2; + case FILEID_INO32_GEN_PARENT: + return 4; + case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: + return 3; + case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: + return 6; + } + return 255; /* invalid */ +} + +STATIC int +xfs_fs_encode_fh( + struct dentry *dentry, + __u32 *fh, + int *max_len, + int connectable) +{ + struct fid *fid = (struct fid *)fh; + struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fh; + struct inode *inode = dentry->d_inode; + int fileid_type; + int len; + + /* Directories don't need their parent encoded, they have ".." */ + if (S_ISDIR(inode->i_mode) || !connectable) + fileid_type = FILEID_INO32_GEN; + else + fileid_type = FILEID_INO32_GEN_PARENT; + + /* + * If the the filesystem may contain 64bit inode numbers, we need + * to use larger file handles that can represent them. + * + * While we only allocate inodes that do not fit into 32 bits any + * large enough filesystem may contain them, thus the slightly + * confusing looking conditional below. + */ + if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) || + (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES)) + fileid_type |= XFS_FILEID_TYPE_64FLAG; + + /* + * Only encode if there is enough space given. In practice + * this means we can't export a filesystem with 64bit inodes + * over NFSv2 with the subtree_check export option; the other + * seven combinations work. The real answer is "don't use v2". + */ + len = xfs_fileid_length(fileid_type); + if (*max_len < len) { + *max_len = len; + return 255; + } + *max_len = len; + + switch (fileid_type) { + case FILEID_INO32_GEN_PARENT: + spin_lock(&dentry->d_lock); + fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino; + fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation; + spin_unlock(&dentry->d_lock); + /*FALLTHRU*/ + case FILEID_INO32_GEN: + fid->i32.ino = inode->i_ino; + fid->i32.gen = inode->i_generation; + break; + case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: + spin_lock(&dentry->d_lock); + fid64->parent_ino = dentry->d_parent->d_inode->i_ino; + fid64->parent_gen = dentry->d_parent->d_inode->i_generation; + spin_unlock(&dentry->d_lock); + /*FALLTHRU*/ + case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: + fid64->ino = inode->i_ino; + fid64->gen = inode->i_generation; + break; + } + + return fileid_type; +} + +STATIC struct inode * +xfs_nfs_get_inode( + struct super_block *sb, + u64 ino, + u32 generation) + { + xfs_mount_t *mp = XFS_M(sb); + xfs_inode_t *ip; + int error; + + /* + * NFS can sometimes send requests for ino 0. Fail them gracefully. + */ + if (ino == 0) + return ERR_PTR(-ESTALE); + + /* + * The XFS_IGET_UNTRUSTED means that an invalid inode number is just + * fine and not an indication of a corrupted filesystem as clients can + * send invalid file handles and we have to handle it gracefully.. + */ + error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip); + if (error) { + /* + * EINVAL means the inode cluster doesn't exist anymore. + * This implies the filehandle is stale, so we should + * translate it here. + * We don't use ESTALE directly down the chain to not + * confuse applications using bulkstat that expect EINVAL. + */ + if (error == EINVAL || error == ENOENT) + error = ESTALE; + return ERR_PTR(-error); + } + + if (ip->i_d.di_gen != generation) { + IRELE(ip); + return ERR_PTR(-ESTALE); + } + + return VFS_I(ip); +} + +STATIC struct dentry * +xfs_fs_fh_to_dentry(struct super_block *sb, struct fid *fid, + int fh_len, int fileid_type) +{ + struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid; + struct inode *inode = NULL; + + if (fh_len < xfs_fileid_length(fileid_type)) + return NULL; + + switch (fileid_type) { + case FILEID_INO32_GEN_PARENT: + case FILEID_INO32_GEN: + inode = xfs_nfs_get_inode(sb, fid->i32.ino, fid->i32.gen); + break; + case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: + case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: + inode = xfs_nfs_get_inode(sb, fid64->ino, fid64->gen); + break; + } + + return d_obtain_alias(inode); +} + +STATIC struct dentry * +xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid, + int fh_len, int fileid_type) +{ + struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid; + struct inode *inode = NULL; + + switch (fileid_type) { + case FILEID_INO32_GEN_PARENT: + inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino, + fid->i32.parent_gen); + break; + case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: + inode = xfs_nfs_get_inode(sb, fid64->parent_ino, + fid64->parent_gen); + break; + } + + return d_obtain_alias(inode); +} + +STATIC struct dentry * +xfs_fs_get_parent( + struct dentry *child) +{ + int error; + struct xfs_inode *cip; + + error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); + if (unlikely(error)) + return ERR_PTR(-error); + + return d_obtain_alias(VFS_I(cip)); +} + +STATIC int +xfs_fs_nfs_commit_metadata( + struct inode *inode) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + int error = 0; + + xfs_ilock(ip, XFS_ILOCK_SHARED); + if (xfs_ipincount(ip)) { + error = _xfs_log_force_lsn(mp, ip->i_itemp->ili_last_lsn, + XFS_LOG_SYNC, NULL); + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + return error; +} + +const struct export_operations xfs_export_operations = { + .encode_fh = xfs_fs_encode_fh, + .fh_to_dentry = xfs_fs_fh_to_dentry, + .fh_to_parent = xfs_fs_fh_to_parent, + .get_parent = xfs_fs_get_parent, + .commit_metadata = xfs_fs_nfs_commit_metadata, +}; diff --git a/fs/xfs/xfs_export.h b/fs/xfs/xfs_export.h new file mode 100644 index 0000000..3272b6a --- /dev/null +++ b/fs/xfs/xfs_export.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_EXPORT_H__ +#define __XFS_EXPORT_H__ + +/* + * Common defines for code related to exporting XFS filesystems over NFS. + * + * The NFS fileid goes out on the wire as an array of + * 32bit unsigned ints in host order. There are 5 possible + * formats. + * + * (1) fileid_type=0x00 + * (no fileid data; handled by the generic code) + * + * (2) fileid_type=0x01 + * inode-num + * generation + * + * (3) fileid_type=0x02 + * inode-num + * generation + * parent-inode-num + * parent-generation + * + * (4) fileid_type=0x81 + * inode-num-lo32 + * inode-num-hi32 + * generation + * + * (5) fileid_type=0x82 + * inode-num-lo32 + * inode-num-hi32 + * generation + * parent-inode-num-lo32 + * parent-inode-num-hi32 + * parent-generation + * + * Note, the NFS filehandle also includes an fsid portion which + * may have an inode number in it. That number is hardcoded to + * 32bits and there is no way for XFS to intercept it. In + * practice this means when exporting an XFS filesystem with 64bit + * inodes you should either export the mountpoint (rather than + * a subdirectory) or use the "fsid" export option. + */ + +struct xfs_fid64 { + u64 ino; + u32 gen; + u64 parent_ino; + u32 parent_gen; +} __attribute__((packed)); + +/* This flag goes on the wire. Don't play with it. */ +#define XFS_FILEID_TYPE_64FLAG 0x80 /* NFS fileid has 64bit inodes */ + +#endif /* __XFS_EXPORT_H__ */ diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c new file mode 100644 index 0000000..7f7b424 --- /dev/null +++ b/fs/xfs/xfs_file.c @@ -0,0 +1,1096 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_trans.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_inode_item.h" +#include "xfs_bmap.h" +#include "xfs_error.h" +#include "xfs_vnodeops.h" +#include "xfs_da_btree.h" +#include "xfs_ioctl.h" +#include "xfs_trace.h" + +#include +#include + +static const struct vm_operations_struct xfs_file_vm_ops; + +/* + * Locking primitives for read and write IO paths to ensure we consistently use + * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. + */ +static inline void +xfs_rw_ilock( + struct xfs_inode *ip, + int type) +{ + if (type & XFS_IOLOCK_EXCL) + mutex_lock(&VFS_I(ip)->i_mutex); + xfs_ilock(ip, type); +} + +static inline void +xfs_rw_iunlock( + struct xfs_inode *ip, + int type) +{ + xfs_iunlock(ip, type); + if (type & XFS_IOLOCK_EXCL) + mutex_unlock(&VFS_I(ip)->i_mutex); +} + +static inline void +xfs_rw_ilock_demote( + struct xfs_inode *ip, + int type) +{ + xfs_ilock_demote(ip, type); + if (type & XFS_IOLOCK_EXCL) + mutex_unlock(&VFS_I(ip)->i_mutex); +} + +/* + * xfs_iozero + * + * xfs_iozero clears the specified range of buffer supplied, + * and marks all the affected blocks as valid and modified. If + * an affected block is not allocated, it will be allocated. If + * an affected block is not completely overwritten, and is not + * valid before the operation, it will be read from disk before + * being partially zeroed. + */ +STATIC int +xfs_iozero( + struct xfs_inode *ip, /* inode */ + loff_t pos, /* offset in file */ + size_t count) /* size of data to zero */ +{ + struct page *page; + struct address_space *mapping; + int status; + + mapping = VFS_I(ip)->i_mapping; + do { + unsigned offset, bytes; + void *fsdata; + + offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ + bytes = PAGE_CACHE_SIZE - offset; + if (bytes > count) + bytes = count; + + status = pagecache_write_begin(NULL, mapping, pos, bytes, + AOP_FLAG_UNINTERRUPTIBLE, + &page, &fsdata); + if (status) + break; + + zero_user(page, offset, bytes); + + status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, + page, fsdata); + WARN_ON(status <= 0); /* can't return less than zero! */ + pos += bytes; + count -= bytes; + status = 0; + } while (count); + + return (-status); +} + +STATIC int +xfs_file_fsync( + struct file *file, + loff_t start, + loff_t end, + int datasync) +{ + struct inode *inode = file->f_mapping->host; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error = 0; + int log_flushed = 0; + + trace_xfs_file_fsync(ip); + + error = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (error) + return error; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + xfs_iflags_clear(ip, XFS_ITRUNCATED); + + xfs_ilock(ip, XFS_IOLOCK_SHARED); + xfs_ioend_wait(ip); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + + if (mp->m_flags & XFS_MOUNT_BARRIER) { + /* + * If we have an RT and/or log subvolume we need to make sure + * to flush the write cache the device used for file data + * first. This is to ensure newly written file data make + * it to disk before logging the new inode size in case of + * an extending write. + */ + if (XFS_IS_REALTIME_INODE(ip)) + xfs_blkdev_issue_flush(mp->m_rtdev_targp); + else if (mp->m_logdev_targp != mp->m_ddev_targp) + xfs_blkdev_issue_flush(mp->m_ddev_targp); + } + + /* + * We always need to make sure that the required inode state is safe on + * disk. The inode might be clean but we still might need to force the + * log because of committed transactions that haven't hit the disk yet. + * Likewise, there could be unflushed non-transactional changes to the + * inode core that have to go to disk and this requires us to issue + * a synchronous transaction to capture these changes correctly. + * + * This code relies on the assumption that if the i_update_core field + * of the inode is clear and the inode is unpinned then it is clean + * and no action is required. + */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + + /* + * First check if the VFS inode is marked dirty. All the dirtying + * of non-transactional updates no goes through mark_inode_dirty*, + * which allows us to distinguish beteeen pure timestamp updates + * and i_size updates which need to be caught for fdatasync. + * After that also theck for the dirty state in the XFS inode, which + * might gets cleared when the inode gets written out via the AIL + * or xfs_iflush_cluster. + */ + if (((inode->i_state & I_DIRTY_DATASYNC) || + ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && + ip->i_update_core) { + /* + * Kick off a transaction to log the inode core to get the + * updates. The sync transaction will also force the log. + */ + xfs_iunlock(ip, XFS_ILOCK_SHARED); + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, + XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return -error; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Note - it's possible that we might have pushed ourselves out + * of the way during trans_reserve which would flush the inode. + * But there's no guarantee that the inode buffer has actually + * gone out yet (it's delwri). Plus the buffer could be pinned + * anyway if it's part of an inode in another recent + * transaction. So we play it safe and fire off the + * transaction anyway. + */ + xfs_trans_ijoin(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + xfs_trans_set_sync(tp); + error = _xfs_trans_commit(tp, 0, &log_flushed); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } else { + /* + * Timestamps/size haven't changed since last inode flush or + * inode transaction commit. That means either nothing got + * written or a transaction committed which caught the updates. + * If the latter happened and the transaction hasn't hit the + * disk yet, the inode will be still be pinned. If it is, + * force the log. + */ + if (xfs_ipincount(ip)) { + error = _xfs_log_force_lsn(mp, + ip->i_itemp->ili_last_lsn, + XFS_LOG_SYNC, &log_flushed); + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + } + + /* + * If we only have a single device, and the log force about was + * a no-op we might have to flush the data device cache here. + * This can only happen for fdatasync/O_DSYNC if we were overwriting + * an already allocated file and thus do not have any metadata to + * commit. + */ + if ((mp->m_flags & XFS_MOUNT_BARRIER) && + mp->m_logdev_targp == mp->m_ddev_targp && + !XFS_IS_REALTIME_INODE(ip) && + !log_flushed) + xfs_blkdev_issue_flush(mp->m_ddev_targp); + + return -error; +} + +STATIC ssize_t +xfs_file_aio_read( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct inode *inode = file->f_mapping->host; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + size_t size = 0; + ssize_t ret = 0; + int ioflags = 0; + xfs_fsize_t n; + unsigned long seg; + + XFS_STATS_INC(xs_read_calls); + + BUG_ON(iocb->ki_pos != pos); + + if (unlikely(file->f_flags & O_DIRECT)) + ioflags |= IO_ISDIRECT; + if (file->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + + /* START copy & waste from filemap.c */ + for (seg = 0; seg < nr_segs; seg++) { + const struct iovec *iv = &iovp[seg]; + + /* + * If any segment has a negative length, or the cumulative + * length ever wraps negative then return -EINVAL. + */ + size += iv->iov_len; + if (unlikely((ssize_t)(size|iv->iov_len) < 0)) + return XFS_ERROR(-EINVAL); + } + /* END copy & waste from filemap.c */ + + if (unlikely(ioflags & IO_ISDIRECT)) { + xfs_buftarg_t *target = + XFS_IS_REALTIME_INODE(ip) ? + mp->m_rtdev_targp : mp->m_ddev_targp; + if ((iocb->ki_pos & target->bt_smask) || + (size & target->bt_smask)) { + if (iocb->ki_pos == ip->i_size) + return 0; + return -XFS_ERROR(EINVAL); + } + } + + n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; + if (n <= 0 || size == 0) + return 0; + + if (n < size) + size = n; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + + if (unlikely(ioflags & IO_ISDIRECT)) { + xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); + + if (inode->i_mapping->nrpages) { + ret = -xfs_flushinval_pages(ip, + (iocb->ki_pos & PAGE_CACHE_MASK), + -1, FI_REMAPF_LOCKED); + if (ret) { + xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); + return ret; + } + } + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); + } else + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); + + trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); + + ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); + if (ret > 0) + XFS_STATS_ADD(xs_read_bytes, ret); + + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); + return ret; +} + +STATIC ssize_t +xfs_file_splice_read( + struct file *infilp, + loff_t *ppos, + struct pipe_inode_info *pipe, + size_t count, + unsigned int flags) +{ + struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); + int ioflags = 0; + ssize_t ret; + + XFS_STATS_INC(xs_read_calls); + + if (infilp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; + + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); + + trace_xfs_file_splice_read(ip, count, *ppos, ioflags); + + ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); + if (ret > 0) + XFS_STATS_ADD(xs_read_bytes, ret); + + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); + return ret; +} + +STATIC void +xfs_aio_write_isize_update( + struct inode *inode, + loff_t *ppos, + ssize_t bytes_written) +{ + struct xfs_inode *ip = XFS_I(inode); + xfs_fsize_t isize = i_size_read(inode); + + if (bytes_written > 0) + XFS_STATS_ADD(xs_write_bytes, bytes_written); + + if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && + *ppos > isize)) + *ppos = isize; + + if (*ppos > ip->i_size) { + xfs_rw_ilock(ip, XFS_ILOCK_EXCL); + if (*ppos > ip->i_size) + ip->i_size = *ppos; + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + } +} + +/* + * If this was a direct or synchronous I/O that failed (such as ENOSPC) then + * part of the I/O may have been written to disk before the error occurred. In + * this case the on-disk file size may have been adjusted beyond the in-memory + * file size and now needs to be truncated back. + */ +STATIC void +xfs_aio_write_newsize_update( + struct xfs_inode *ip) +{ + if (ip->i_new_size) { + xfs_rw_ilock(ip, XFS_ILOCK_EXCL); + ip->i_new_size = 0; + if (ip->i_d.di_size > ip->i_size) + ip->i_d.di_size = ip->i_size; + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + } +} + +/* + * xfs_file_splice_write() does not use xfs_rw_ilock() because + * generic_file_splice_write() takes the i_mutex itself. This, in theory, + * couuld cause lock inversions between the aio_write path and the splice path + * if someone is doing concurrent splice(2) based writes and write(2) based + * writes to the same inode. The only real way to fix this is to re-implement + * the generic code here with correct locking orders. + */ +STATIC ssize_t +xfs_file_splice_write( + struct pipe_inode_info *pipe, + struct file *outfilp, + loff_t *ppos, + size_t count, + unsigned int flags) +{ + struct inode *inode = outfilp->f_mapping->host; + struct xfs_inode *ip = XFS_I(inode); + xfs_fsize_t new_size; + int ioflags = 0; + ssize_t ret; + + XFS_STATS_INC(xs_write_calls); + + if (outfilp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + new_size = *ppos + count; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (new_size > ip->i_size) + ip->i_new_size = new_size; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + trace_xfs_file_splice_write(ip, count, *ppos, ioflags); + + ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); + + xfs_aio_write_isize_update(inode, ppos, ret); + xfs_aio_write_newsize_update(ip); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return ret; +} + +/* + * This routine is called to handle zeroing any space in the last + * block of the file that is beyond the EOF. We do this since the + * size is being increased without writing anything to that block + * and we don't want anyone to read the garbage on the disk. + */ +STATIC int /* error (positive) */ +xfs_zero_last_block( + xfs_inode_t *ip, + xfs_fsize_t offset, + xfs_fsize_t isize) +{ + xfs_fileoff_t last_fsb; + xfs_mount_t *mp = ip->i_mount; + int nimaps; + int zero_offset; + int zero_len; + int error = 0; + xfs_bmbt_irec_t imap; + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + + zero_offset = XFS_B_FSB_OFFSET(mp, isize); + if (zero_offset == 0) { + /* + * There are no extra bytes in the last block on disk to + * zero, so return. + */ + return 0; + } + + last_fsb = XFS_B_TO_FSBT(mp, isize); + nimaps = 1; + error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, + &nimaps, NULL); + if (error) { + return error; + } + ASSERT(nimaps > 0); + /* + * If the block underlying isize is just a hole, then there + * is nothing to zero. + */ + if (imap.br_startblock == HOLESTARTBLOCK) { + return 0; + } + /* + * Zero the part of the last block beyond the EOF, and write it + * out sync. We need to drop the ilock while we do this so we + * don't deadlock when the buffer cache calls back to us. + */ + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + zero_len = mp->m_sb.sb_blocksize - zero_offset; + if (isize + zero_len > offset) + zero_len = offset - isize; + error = xfs_iozero(ip, isize, zero_len); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + ASSERT(error >= 0); + return error; +} + +/* + * Zero any on disk space between the current EOF and the new, + * larger EOF. This handles the normal case of zeroing the remainder + * of the last block in the file and the unusual case of zeroing blocks + * out beyond the size of the file. This second case only happens + * with fixed size extents and when the system crashes before the inode + * size was updated but after blocks were allocated. If fill is set, + * then any holes in the range are filled and zeroed. If not, the holes + * are left alone as holes. + */ + +int /* error (positive) */ +xfs_zero_eof( + xfs_inode_t *ip, + xfs_off_t offset, /* starting I/O offset */ + xfs_fsize_t isize) /* current inode size */ +{ + xfs_mount_t *mp = ip->i_mount; + xfs_fileoff_t start_zero_fsb; + xfs_fileoff_t end_zero_fsb; + xfs_fileoff_t zero_count_fsb; + xfs_fileoff_t last_fsb; + xfs_fileoff_t zero_off; + xfs_fsize_t zero_len; + int nimaps; + int error = 0; + xfs_bmbt_irec_t imap; + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); + ASSERT(offset > isize); + + /* + * First handle zeroing the block on which isize resides. + * We only zero a part of that block so it is handled specially. + */ + error = xfs_zero_last_block(ip, offset, isize); + if (error) { + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); + return error; + } + + /* + * Calculate the range between the new size and the old + * where blocks needing to be zeroed may exist. To get the + * block where the last byte in the file currently resides, + * we need to subtract one from the size and truncate back + * to a block boundary. We subtract 1 in case the size is + * exactly on a block boundary. + */ + last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; + start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); + end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); + ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); + if (last_fsb == end_zero_fsb) { + /* + * The size was only incremented on its last block. + * We took care of that above, so just return. + */ + return 0; + } + + ASSERT(start_zero_fsb <= end_zero_fsb); + while (start_zero_fsb <= end_zero_fsb) { + nimaps = 1; + zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; + error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, + 0, NULL, 0, &imap, &nimaps, NULL); + if (error) { + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); + return error; + } + ASSERT(nimaps > 0); + + if (imap.br_state == XFS_EXT_UNWRITTEN || + imap.br_startblock == HOLESTARTBLOCK) { + /* + * This loop handles initializing pages that were + * partially initialized by the code below this + * loop. It basically zeroes the part of the page + * that sits on a hole and sets the page as P_HOLE + * and calls remapf if it is a mapped file. + */ + start_zero_fsb = imap.br_startoff + imap.br_blockcount; + ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); + continue; + } + + /* + * There are blocks we need to zero. + * Drop the inode lock while we're doing the I/O. + * We'll still have the iolock to protect us. + */ + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); + zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); + + if ((zero_off + zero_len) > offset) + zero_len = offset - zero_off; + + error = xfs_iozero(ip, zero_off, zero_len); + if (error) { + goto out_lock; + } + + start_zero_fsb = imap.br_startoff + imap.br_blockcount; + ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); + + xfs_ilock(ip, XFS_ILOCK_EXCL); + } + + return 0; + +out_lock: + xfs_ilock(ip, XFS_ILOCK_EXCL); + ASSERT(error >= 0); + return error; +} + +/* + * Common pre-write limit and setup checks. + * + * Returns with iolock held according to @iolock. + */ +STATIC ssize_t +xfs_file_aio_write_checks( + struct file *file, + loff_t *pos, + size_t *count, + int *iolock) +{ + struct inode *inode = file->f_mapping->host; + struct xfs_inode *ip = XFS_I(inode); + xfs_fsize_t new_size; + int error = 0; + + error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); + if (error) { + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); + *iolock = 0; + return error; + } + + new_size = *pos + *count; + if (new_size > ip->i_size) + ip->i_new_size = new_size; + + if (likely(!(file->f_mode & FMODE_NOCMTIME))) + file_update_time(file); + + /* + * If the offset is beyond the size of the file, we need to zero any + * blocks that fall between the existing EOF and the start of this + * write. + */ + if (*pos > ip->i_size) + error = -xfs_zero_eof(ip, *pos, ip->i_size); + + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + if (error) + return error; + + /* + * If we're writing the file then make sure to clear the setuid and + * setgid bits if the process is not being run by root. This keeps + * people from modifying setuid and setgid binaries. + */ + return file_remove_suid(file); + +} + +/* + * xfs_file_dio_aio_write - handle direct IO writes + * + * Lock the inode appropriately to prepare for and issue a direct IO write. + * By separating it from the buffered write path we remove all the tricky to + * follow locking changes and looping. + * + * If there are cached pages or we're extending the file, we need IOLOCK_EXCL + * until we're sure the bytes at the new EOF have been zeroed and/or the cached + * pages are flushed out. + * + * In most cases the direct IO writes will be done holding IOLOCK_SHARED + * allowing them to be done in parallel with reads and other direct IO writes. + * However, if the IO is not aligned to filesystem blocks, the direct IO layer + * needs to do sub-block zeroing and that requires serialisation against other + * direct IOs to the same block. In this case we need to serialise the + * submission of the unaligned IOs so that we don't get racing block zeroing in + * the dio layer. To avoid the problem with aio, we also need to wait for + * outstanding IOs to complete so that unwritten extent conversion is completed + * before we try to map the overlapping block. This is currently implemented by + * hitting it with a big hammer (i.e. xfs_ioend_wait()). + * + * Returns with locks held indicated by @iolock and errors indicated by + * negative return values. + */ +STATIC ssize_t +xfs_file_dio_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos, + size_t ocount, + int *iolock) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + ssize_t ret = 0; + size_t count = ocount; + int unaligned_io = 0; + struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? + mp->m_rtdev_targp : mp->m_ddev_targp; + + *iolock = 0; + if ((pos & target->bt_smask) || (count & target->bt_smask)) + return -XFS_ERROR(EINVAL); + + if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) + unaligned_io = 1; + + if (unaligned_io || mapping->nrpages || pos > ip->i_size) + *iolock = XFS_IOLOCK_EXCL; + else + *iolock = XFS_IOLOCK_SHARED; + xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); + + ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); + if (ret) + return ret; + + if (mapping->nrpages) { + WARN_ON(*iolock != XFS_IOLOCK_EXCL); + ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, + FI_REMAPF_LOCKED); + if (ret) + return ret; + } + + /* + * If we are doing unaligned IO, wait for all other IO to drain, + * otherwise demote the lock if we had to flush cached pages + */ + if (unaligned_io) + xfs_ioend_wait(ip); + else if (*iolock == XFS_IOLOCK_EXCL) { + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); + *iolock = XFS_IOLOCK_SHARED; + } + + trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); + ret = generic_file_direct_write(iocb, iovp, + &nr_segs, pos, &iocb->ki_pos, count, ocount); + + /* No fallback to buffered IO on errors for XFS. */ + ASSERT(ret < 0 || ret == count); + return ret; +} + +STATIC ssize_t +xfs_file_buffered_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos, + size_t ocount, + int *iolock) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + ssize_t ret; + int enospc = 0; + size_t count = ocount; + + *iolock = XFS_IOLOCK_EXCL; + xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); + + ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); + if (ret) + return ret; + + /* We can write back this queue in page reclaim */ + current->backing_dev_info = mapping->backing_dev_info; + +write_retry: + trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); + ret = generic_file_buffered_write(iocb, iovp, nr_segs, + pos, &iocb->ki_pos, count, ret); + /* + * if we just got an ENOSPC, flush the inode now we aren't holding any + * page locks and retry *once* + */ + if (ret == -ENOSPC && !enospc) { + ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); + if (ret) + return ret; + enospc = 1; + goto write_retry; + } + current->backing_dev_info = NULL; + return ret; +} + +STATIC ssize_t +xfs_file_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + ssize_t ret; + int iolock; + size_t ocount = 0; + + XFS_STATS_INC(xs_write_calls); + + BUG_ON(iocb->ki_pos != pos); + + ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); + if (ret) + return ret; + + if (ocount == 0) + return 0; + + xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; + + if (unlikely(file->f_flags & O_DIRECT)) + ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); + else + ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); + + xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); + + if (ret <= 0) + goto out_unlock; + + /* Handle various SYNC-type writes */ + if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { + loff_t end = pos + ret - 1; + int error; + + xfs_rw_iunlock(ip, iolock); + error = xfs_file_fsync(file, pos, end, + (file->f_flags & __O_SYNC) ? 0 : 1); + xfs_rw_ilock(ip, iolock); + if (error) + ret = error; + } + +out_unlock: + xfs_aio_write_newsize_update(ip); + xfs_rw_iunlock(ip, iolock); + return ret; +} + +STATIC long +xfs_file_fallocate( + struct file *file, + int mode, + loff_t offset, + loff_t len) +{ + struct inode *inode = file->f_path.dentry->d_inode; + long error; + loff_t new_size = 0; + xfs_flock64_t bf; + xfs_inode_t *ip = XFS_I(inode); + int cmd = XFS_IOC_RESVSP; + int attr_flags = XFS_ATTR_NOLOCK; + + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + bf.l_whence = 0; + bf.l_start = offset; + bf.l_len = len; + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + if (mode & FALLOC_FL_PUNCH_HOLE) + cmd = XFS_IOC_UNRESVSP; + + /* check the new inode size is valid before allocating */ + if (!(mode & FALLOC_FL_KEEP_SIZE) && + offset + len > i_size_read(inode)) { + new_size = offset + len; + error = inode_newsize_ok(inode, new_size); + if (error) + goto out_unlock; + } + + if (file->f_flags & O_DSYNC) + attr_flags |= XFS_ATTR_SYNC; + + error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); + if (error) + goto out_unlock; + + /* Change file size if needed */ + if (new_size) { + struct iattr iattr; + + iattr.ia_valid = ATTR_SIZE; + iattr.ia_size = new_size; + error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); + } + +out_unlock: + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; +} + + +STATIC int +xfs_file_open( + struct inode *inode, + struct file *file) +{ + if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) + return -EFBIG; + if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) + return -EIO; + return 0; +} + +STATIC int +xfs_dir_open( + struct inode *inode, + struct file *file) +{ + struct xfs_inode *ip = XFS_I(inode); + int mode; + int error; + + error = xfs_file_open(inode, file); + if (error) + return error; + + /* + * If there are any blocks, read-ahead block 0 as we're almost + * certain to have the next operation be a read there. + */ + mode = xfs_ilock_map_shared(ip); + if (ip->i_d.di_nextents > 0) + xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); + xfs_iunlock(ip, mode); + return 0; +} + +STATIC int +xfs_file_release( + struct inode *inode, + struct file *filp) +{ + return -xfs_release(XFS_I(inode)); +} + +STATIC int +xfs_file_readdir( + struct file *filp, + void *dirent, + filldir_t filldir) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + xfs_inode_t *ip = XFS_I(inode); + int error; + size_t bufsize; + + /* + * The Linux API doesn't pass down the total size of the buffer + * we read into down to the filesystem. With the filldir concept + * it's not needed for correct information, but the XFS dir2 leaf + * code wants an estimate of the buffer size to calculate it's + * readahead window and size the buffers used for mapping to + * physical blocks. + * + * Try to give it an estimate that's good enough, maybe at some + * point we can change the ->readdir prototype to include the + * buffer size. For now we use the current glibc buffer size. + */ + bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); + + error = xfs_readdir(ip, dirent, bufsize, + (xfs_off_t *)&filp->f_pos, filldir); + if (error) + return -error; + return 0; +} + +STATIC int +xfs_file_mmap( + struct file *filp, + struct vm_area_struct *vma) +{ + vma->vm_ops = &xfs_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; + + file_accessed(filp); + return 0; +} + +/* + * mmap()d file has taken write protection fault and is being made + * writable. We can set the page state up correctly for a writable + * page, which means we can do correct delalloc accounting (ENOSPC + * checking!) and unwritten extent mapping. + */ +STATIC int +xfs_vm_page_mkwrite( + struct vm_area_struct *vma, + struct vm_fault *vmf) +{ + return block_page_mkwrite(vma, vmf, xfs_get_blocks); +} + +const struct file_operations xfs_file_operations = { + .llseek = generic_file_llseek, + .read = do_sync_read, + .write = do_sync_write, + .aio_read = xfs_file_aio_read, + .aio_write = xfs_file_aio_write, + .splice_read = xfs_file_splice_read, + .splice_write = xfs_file_splice_write, + .unlocked_ioctl = xfs_file_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = xfs_file_compat_ioctl, +#endif + .mmap = xfs_file_mmap, + .open = xfs_file_open, + .release = xfs_file_release, + .fsync = xfs_file_fsync, + .fallocate = xfs_file_fallocate, +}; + +const struct file_operations xfs_dir_file_operations = { + .open = xfs_dir_open, + .read = generic_read_dir, + .readdir = xfs_file_readdir, + .llseek = generic_file_llseek, + .unlocked_ioctl = xfs_file_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = xfs_file_compat_ioctl, +#endif + .fsync = xfs_file_fsync, +}; + +static const struct vm_operations_struct xfs_file_vm_ops = { + .fault = filemap_fault, + .page_mkwrite = xfs_vm_page_mkwrite, +}; diff --git a/fs/xfs/xfs_fs_subr.c b/fs/xfs/xfs_fs_subr.c new file mode 100644 index 0000000..ed88ed1 --- /dev/null +++ b/fs/xfs/xfs_fs_subr.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000-2002,2005-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_vnodeops.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_trace.h" + +/* + * note: all filemap functions return negative error codes. These + * need to be inverted before returning to the xfs core functions. + */ +void +xfs_tosspages( + xfs_inode_t *ip, + xfs_off_t first, + xfs_off_t last, + int fiopt) +{ + /* can't toss partial tail pages, so mask them out */ + last &= ~(PAGE_SIZE - 1); + truncate_inode_pages_range(VFS_I(ip)->i_mapping, first, last - 1); +} + +int +xfs_flushinval_pages( + xfs_inode_t *ip, + xfs_off_t first, + xfs_off_t last, + int fiopt) +{ + struct address_space *mapping = VFS_I(ip)->i_mapping; + int ret = 0; + + trace_xfs_pagecache_inval(ip, first, last); + + xfs_iflags_clear(ip, XFS_ITRUNCATED); + ret = filemap_write_and_wait_range(mapping, first, + last == -1 ? LLONG_MAX : last); + if (!ret) + truncate_inode_pages_range(mapping, first, last); + return -ret; +} + +int +xfs_flush_pages( + xfs_inode_t *ip, + xfs_off_t first, + xfs_off_t last, + uint64_t flags, + int fiopt) +{ + struct address_space *mapping = VFS_I(ip)->i_mapping; + int ret = 0; + int ret2; + + xfs_iflags_clear(ip, XFS_ITRUNCATED); + ret = -filemap_fdatawrite_range(mapping, first, + last == -1 ? LLONG_MAX : last); + if (flags & XBF_ASYNC) + return ret; + ret2 = xfs_wait_on_pages(ip, first, last); + if (!ret) + ret = ret2; + return ret; +} + +int +xfs_wait_on_pages( + xfs_inode_t *ip, + xfs_off_t first, + xfs_off_t last) +{ + struct address_space *mapping = VFS_I(ip)->i_mapping; + + if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { + return -filemap_fdatawait_range(mapping, first, + last == -1 ? ip->i_size - 1 : last); + } + return 0; +} diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c new file mode 100644 index 0000000..76e81cf --- /dev/null +++ b/fs/xfs/xfs_globals.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_sysctl.h" + +/* + * Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n, + * other XFS code uses these values. Times are measured in centisecs (i.e. + * 100ths of a second). + */ +xfs_param_t xfs_params = { + /* MIN DFLT MAX */ + .sgid_inherit = { 0, 0, 1 }, + .symlink_mode = { 0, 0, 1 }, + .panic_mask = { 0, 0, 255 }, + .error_level = { 0, 3, 11 }, + .syncd_timer = { 1*100, 30*100, 7200*100}, + .stats_clear = { 0, 0, 1 }, + .inherit_sync = { 0, 1, 1 }, + .inherit_nodump = { 0, 1, 1 }, + .inherit_noatim = { 0, 1, 1 }, + .xfs_buf_timer = { 100/2, 1*100, 30*100 }, + .xfs_buf_age = { 1*100, 15*100, 7200*100}, + .inherit_nosym = { 0, 0, 1 }, + .rotorstep = { 1, 1, 255 }, + .inherit_nodfrg = { 0, 1, 1 }, + .fstrm_timer = { 1, 30*100, 3600*100}, +}; diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c new file mode 100644 index 0000000..f7ce7de --- /dev/null +++ b/fs/xfs/xfs_ioctl.c @@ -0,0 +1,1556 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_ioctl.h" +#include "xfs_rtalloc.h" +#include "xfs_itable.h" +#include "xfs_error.h" +#include "xfs_attr.h" +#include "xfs_bmap.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_dfrag.h" +#include "xfs_fsops.h" +#include "xfs_vnodeops.h" +#include "xfs_discard.h" +#include "xfs_quota.h" +#include "xfs_inode_item.h" +#include "xfs_export.h" +#include "xfs_trace.h" + +#include +#include +#include +#include +#include +#include +#include + +/* + * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to + * a file or fs handle. + * + * XFS_IOC_PATH_TO_FSHANDLE + * returns fs handle for a mount point or path within that mount point + * XFS_IOC_FD_TO_HANDLE + * returns full handle for a FD opened in user space + * XFS_IOC_PATH_TO_HANDLE + * returns full handle for a path + */ +int +xfs_find_handle( + unsigned int cmd, + xfs_fsop_handlereq_t *hreq) +{ + int hsize; + xfs_handle_t handle; + struct inode *inode; + struct file *file = NULL; + struct path path; + int error; + struct xfs_inode *ip; + + if (cmd == XFS_IOC_FD_TO_HANDLE) { + file = fget(hreq->fd); + if (!file) + return -EBADF; + inode = file->f_path.dentry->d_inode; + } else { + error = user_lpath((const char __user *)hreq->path, &path); + if (error) + return error; + inode = path.dentry->d_inode; + } + ip = XFS_I(inode); + + /* + * We can only generate handles for inodes residing on a XFS filesystem, + * and only for regular files, directories or symbolic links. + */ + error = -EINVAL; + if (inode->i_sb->s_magic != XFS_SB_MAGIC) + goto out_put; + + error = -EBADF; + if (!S_ISREG(inode->i_mode) && + !S_ISDIR(inode->i_mode) && + !S_ISLNK(inode->i_mode)) + goto out_put; + + + memcpy(&handle.ha_fsid, ip->i_mount->m_fixedfsid, sizeof(xfs_fsid_t)); + + if (cmd == XFS_IOC_PATH_TO_FSHANDLE) { + /* + * This handle only contains an fsid, zero the rest. + */ + memset(&handle.ha_fid, 0, sizeof(handle.ha_fid)); + hsize = sizeof(xfs_fsid_t); + } else { + int lock_mode; + + lock_mode = xfs_ilock_map_shared(ip); + handle.ha_fid.fid_len = sizeof(xfs_fid_t) - + sizeof(handle.ha_fid.fid_len); + handle.ha_fid.fid_pad = 0; + handle.ha_fid.fid_gen = ip->i_d.di_gen; + handle.ha_fid.fid_ino = ip->i_ino; + xfs_iunlock_map_shared(ip, lock_mode); + + hsize = XFS_HSIZE(handle); + } + + error = -EFAULT; + if (copy_to_user(hreq->ohandle, &handle, hsize) || + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) + goto out_put; + + error = 0; + + out_put: + if (cmd == XFS_IOC_FD_TO_HANDLE) + fput(file); + else + path_put(&path); + return error; +} + +/* + * No need to do permission checks on the various pathname components + * as the handle operations are privileged. + */ +STATIC int +xfs_handle_acceptable( + void *context, + struct dentry *dentry) +{ + return 1; +} + +/* + * Convert userspace handle data into a dentry. + */ +struct dentry * +xfs_handle_to_dentry( + struct file *parfilp, + void __user *uhandle, + u32 hlen) +{ + xfs_handle_t handle; + struct xfs_fid64 fid; + + /* + * Only allow handle opens under a directory. + */ + if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode)) + return ERR_PTR(-ENOTDIR); + + if (hlen != sizeof(xfs_handle_t)) + return ERR_PTR(-EINVAL); + if (copy_from_user(&handle, uhandle, hlen)) + return ERR_PTR(-EFAULT); + if (handle.ha_fid.fid_len != + sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len)) + return ERR_PTR(-EINVAL); + + memset(&fid, 0, sizeof(struct fid)); + fid.ino = handle.ha_fid.fid_ino; + fid.gen = handle.ha_fid.fid_gen; + + return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3, + FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG, + xfs_handle_acceptable, NULL); +} + +STATIC struct dentry * +xfs_handlereq_to_dentry( + struct file *parfilp, + xfs_fsop_handlereq_t *hreq) +{ + return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen); +} + +int +xfs_open_by_handle( + struct file *parfilp, + xfs_fsop_handlereq_t *hreq) +{ + const struct cred *cred = current_cred(); + int error; + int fd; + int permflag; + struct file *filp; + struct inode *inode; + struct dentry *dentry; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + + dentry = xfs_handlereq_to_dentry(parfilp, hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + inode = dentry->d_inode; + + /* Restrict xfs_open_by_handle to directories & regular files. */ + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { + error = -XFS_ERROR(EPERM); + goto out_dput; + } + +#if BITS_PER_LONG != 32 + hreq->oflags |= O_LARGEFILE; +#endif + + /* Put open permission in namei format. */ + permflag = hreq->oflags; + if ((permflag+1) & O_ACCMODE) + permflag++; + if (permflag & O_TRUNC) + permflag |= 2; + + if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && + (permflag & FMODE_WRITE) && IS_APPEND(inode)) { + error = -XFS_ERROR(EPERM); + goto out_dput; + } + + if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) { + error = -XFS_ERROR(EACCES); + goto out_dput; + } + + /* Can't write directories. */ + if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) { + error = -XFS_ERROR(EISDIR); + goto out_dput; + } + + fd = get_unused_fd(); + if (fd < 0) { + error = fd; + goto out_dput; + } + + filp = dentry_open(dentry, mntget(parfilp->f_path.mnt), + hreq->oflags, cred); + if (IS_ERR(filp)) { + put_unused_fd(fd); + return PTR_ERR(filp); + } + + if (S_ISREG(inode->i_mode)) { + filp->f_flags |= O_NOATIME; + filp->f_mode |= FMODE_NOCMTIME; + } + + fd_install(fd, filp); + return fd; + + out_dput: + dput(dentry); + return error; +} + +/* + * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's + * unused first argument. + */ +STATIC int +do_readlink( + char __user *buffer, + int buflen, + const char *link) +{ + int len; + + len = PTR_ERR(link); + if (IS_ERR(link)) + goto out; + + len = strlen(link); + if (len > (unsigned) buflen) + len = buflen; + if (copy_to_user(buffer, link, len)) + len = -EFAULT; + out: + return len; +} + + +int +xfs_readlink_by_handle( + struct file *parfilp, + xfs_fsop_handlereq_t *hreq) +{ + struct dentry *dentry; + __u32 olen; + void *link; + int error; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + + dentry = xfs_handlereq_to_dentry(parfilp, hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + /* Restrict this handle operation to symlinks only. */ + if (!S_ISLNK(dentry->d_inode->i_mode)) { + error = -XFS_ERROR(EINVAL); + goto out_dput; + } + + if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { + error = -XFS_ERROR(EFAULT); + goto out_dput; + } + + link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); + if (!link) { + error = -XFS_ERROR(ENOMEM); + goto out_dput; + } + + error = -xfs_readlink(XFS_I(dentry->d_inode), link); + if (error) + goto out_kfree; + error = do_readlink(hreq->ohandle, olen, link); + if (error) + goto out_kfree; + + out_kfree: + kfree(link); + out_dput: + dput(dentry); + return error; +} + +STATIC int +xfs_fssetdm_by_handle( + struct file *parfilp, + void __user *arg) +{ + int error; + struct fsdmidata fsd; + xfs_fsop_setdm_handlereq_t dmhreq; + struct dentry *dentry; + + if (!capable(CAP_MKNOD)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) + return -XFS_ERROR(EFAULT); + + dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { + error = -XFS_ERROR(EPERM); + goto out; + } + + if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { + error = -XFS_ERROR(EFAULT); + goto out; + } + + error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, + fsd.fsd_dmstate); + + out: + dput(dentry); + return error; +} + +STATIC int +xfs_attrlist_by_handle( + struct file *parfilp, + void __user *arg) +{ + int error = -ENOMEM; + attrlist_cursor_kern_t *cursor; + xfs_fsop_attrlist_handlereq_t al_hreq; + struct dentry *dentry; + char *kbuf; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) + return -XFS_ERROR(EFAULT); + if (al_hreq.buflen > XATTR_LIST_MAX) + return -XFS_ERROR(EINVAL); + + /* + * Reject flags, only allow namespaces. + */ + if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) + return -XFS_ERROR(EINVAL); + + dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL); + if (!kbuf) + goto out_dput; + + cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; + error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, + al_hreq.flags, cursor); + if (error) + goto out_kfree; + + if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen)) + error = -EFAULT; + + out_kfree: + kfree(kbuf); + out_dput: + dput(dentry); + return error; +} + +int +xfs_attrmulti_attr_get( + struct inode *inode, + unsigned char *name, + unsigned char __user *ubuf, + __uint32_t *len, + __uint32_t flags) +{ + unsigned char *kbuf; + int error = EFAULT; + + if (*len > XATTR_SIZE_MAX) + return EINVAL; + kbuf = kmalloc(*len, GFP_KERNEL); + if (!kbuf) + return ENOMEM; + + error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); + if (error) + goto out_kfree; + + if (copy_to_user(ubuf, kbuf, *len)) + error = EFAULT; + + out_kfree: + kfree(kbuf); + return error; +} + +int +xfs_attrmulti_attr_set( + struct inode *inode, + unsigned char *name, + const unsigned char __user *ubuf, + __uint32_t len, + __uint32_t flags) +{ + unsigned char *kbuf; + int error = EFAULT; + + if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) + return EPERM; + if (len > XATTR_SIZE_MAX) + return EINVAL; + + kbuf = memdup_user(ubuf, len); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); + + error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); + + return error; +} + +int +xfs_attrmulti_attr_remove( + struct inode *inode, + unsigned char *name, + __uint32_t flags) +{ + if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) + return EPERM; + return xfs_attr_remove(XFS_I(inode), name, flags); +} + +STATIC int +xfs_attrmulti_by_handle( + struct file *parfilp, + void __user *arg) +{ + int error; + xfs_attr_multiop_t *ops; + xfs_fsop_attrmulti_handlereq_t am_hreq; + struct dentry *dentry; + unsigned int i, size; + unsigned char *attr_name; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) + return -XFS_ERROR(EFAULT); + + /* overflow check */ + if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) + return -E2BIG; + + dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + error = E2BIG; + size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); + if (!size || size > 16 * PAGE_SIZE) + goto out_dput; + + ops = memdup_user(am_hreq.ops, size); + if (IS_ERR(ops)) { + error = PTR_ERR(ops); + goto out_dput; + } + + attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); + if (!attr_name) + goto out_kfree_ops; + + error = 0; + for (i = 0; i < am_hreq.opcount; i++) { + ops[i].am_error = strncpy_from_user((char *)attr_name, + ops[i].am_attrname, MAXNAMELEN); + if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) + error = -ERANGE; + if (ops[i].am_error < 0) + break; + + switch (ops[i].am_opcode) { + case ATTR_OP_GET: + ops[i].am_error = xfs_attrmulti_attr_get( + dentry->d_inode, attr_name, + ops[i].am_attrvalue, &ops[i].am_length, + ops[i].am_flags); + break; + case ATTR_OP_SET: + ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); + if (ops[i].am_error) + break; + ops[i].am_error = xfs_attrmulti_attr_set( + dentry->d_inode, attr_name, + ops[i].am_attrvalue, ops[i].am_length, + ops[i].am_flags); + mnt_drop_write(parfilp->f_path.mnt); + break; + case ATTR_OP_REMOVE: + ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); + if (ops[i].am_error) + break; + ops[i].am_error = xfs_attrmulti_attr_remove( + dentry->d_inode, attr_name, + ops[i].am_flags); + mnt_drop_write(parfilp->f_path.mnt); + break; + default: + ops[i].am_error = EINVAL; + } + } + + if (copy_to_user(am_hreq.ops, ops, size)) + error = XFS_ERROR(EFAULT); + + kfree(attr_name); + out_kfree_ops: + kfree(ops); + out_dput: + dput(dentry); + return -error; +} + +int +xfs_ioc_space( + struct xfs_inode *ip, + struct inode *inode, + struct file *filp, + int ioflags, + unsigned int cmd, + xfs_flock64_t *bf) +{ + int attr_flags = 0; + int error; + + /* + * Only allow the sys admin to reserve space unless + * unwritten extents are enabled. + */ + if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && + !capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + + if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) + return -XFS_ERROR(EPERM); + + if (!(filp->f_mode & FMODE_WRITE)) + return -XFS_ERROR(EBADF); + + if (!S_ISREG(inode->i_mode)) + return -XFS_ERROR(EINVAL); + + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + attr_flags |= XFS_ATTR_NONBLOCK; + + if (filp->f_flags & O_DSYNC) + attr_flags |= XFS_ATTR_SYNC; + + if (ioflags & IO_INVIS) + attr_flags |= XFS_ATTR_DMI; + + error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags); + return -error; +} + +STATIC int +xfs_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + void __user *arg) +{ + xfs_fsop_bulkreq_t bulkreq; + int count; /* # of records returned */ + xfs_ino_t inlast; /* last inode number */ + int done; + int error; + + /* done = 1 if there are more stats to get and if bulkstat */ + /* should be called again (unused here, but used in dmapi) */ + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) + return -XFS_ERROR(EFAULT); + + if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) + return -XFS_ERROR(EFAULT); + + if ((count = bulkreq.icount) <= 0) + return -XFS_ERROR(EINVAL); + + if (bulkreq.ubuffer == NULL) + return -XFS_ERROR(EINVAL); + + if (cmd == XFS_IOC_FSINUMBERS) + error = xfs_inumbers(mp, &inlast, &count, + bulkreq.ubuffer, xfs_inumbers_fmt); + else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) + error = xfs_bulkstat_single(mp, &inlast, + bulkreq.ubuffer, &done); + else /* XFS_IOC_FSBULKSTAT */ + error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, + sizeof(xfs_bstat_t), bulkreq.ubuffer, + &done); + + if (error) + return -error; + + if (bulkreq.ocount != NULL) { + if (copy_to_user(bulkreq.lastip, &inlast, + sizeof(xfs_ino_t))) + return -XFS_ERROR(EFAULT); + + if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) + return -XFS_ERROR(EFAULT); + } + + return 0; +} + +STATIC int +xfs_ioc_fsgeometry_v1( + xfs_mount_t *mp, + void __user *arg) +{ + xfs_fsop_geom_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, &fsgeo, 3); + if (error) + return -error; + + /* + * Caller should have passed an argument of type + * xfs_fsop_geom_v1_t. This is a proper subset of the + * xfs_fsop_geom_t that xfs_fs_geometry() fills in. + */ + if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_ioc_fsgeometry( + xfs_mount_t *mp, + void __user *arg) +{ + xfs_fsop_geom_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, &fsgeo, 4); + if (error) + return -error; + + if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) + return -XFS_ERROR(EFAULT); + return 0; +} + +/* + * Linux extended inode flags interface. + */ + +STATIC unsigned int +xfs_merge_ioc_xflags( + unsigned int flags, + unsigned int start) +{ + unsigned int xflags = start; + + if (flags & FS_IMMUTABLE_FL) + xflags |= XFS_XFLAG_IMMUTABLE; + else + xflags &= ~XFS_XFLAG_IMMUTABLE; + if (flags & FS_APPEND_FL) + xflags |= XFS_XFLAG_APPEND; + else + xflags &= ~XFS_XFLAG_APPEND; + if (flags & FS_SYNC_FL) + xflags |= XFS_XFLAG_SYNC; + else + xflags &= ~XFS_XFLAG_SYNC; + if (flags & FS_NOATIME_FL) + xflags |= XFS_XFLAG_NOATIME; + else + xflags &= ~XFS_XFLAG_NOATIME; + if (flags & FS_NODUMP_FL) + xflags |= XFS_XFLAG_NODUMP; + else + xflags &= ~XFS_XFLAG_NODUMP; + + return xflags; +} + +STATIC unsigned int +xfs_di2lxflags( + __uint16_t di_flags) +{ + unsigned int flags = 0; + + if (di_flags & XFS_DIFLAG_IMMUTABLE) + flags |= FS_IMMUTABLE_FL; + if (di_flags & XFS_DIFLAG_APPEND) + flags |= FS_APPEND_FL; + if (di_flags & XFS_DIFLAG_SYNC) + flags |= FS_SYNC_FL; + if (di_flags & XFS_DIFLAG_NOATIME) + flags |= FS_NOATIME_FL; + if (di_flags & XFS_DIFLAG_NODUMP) + flags |= FS_NODUMP_FL; + return flags; +} + +STATIC int +xfs_ioc_fsgetxattr( + xfs_inode_t *ip, + int attr, + void __user *arg) +{ + struct fsxattr fa; + + memset(&fa, 0, sizeof(struct fsxattr)); + + xfs_ilock(ip, XFS_ILOCK_SHARED); + fa.fsx_xflags = xfs_ip2xflags(ip); + fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; + fa.fsx_projid = xfs_get_projid(ip); + + if (attr) { + if (ip->i_afp) { + if (ip->i_afp->if_flags & XFS_IFEXTENTS) + fa.fsx_nextents = ip->i_afp->if_bytes / + sizeof(xfs_bmbt_rec_t); + else + fa.fsx_nextents = ip->i_d.di_anextents; + } else + fa.fsx_nextents = 0; + } else { + if (ip->i_df.if_flags & XFS_IFEXTENTS) + fa.fsx_nextents = ip->i_df.if_bytes / + sizeof(xfs_bmbt_rec_t); + else + fa.fsx_nextents = ip->i_d.di_nextents; + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (copy_to_user(arg, &fa, sizeof(fa))) + return -EFAULT; + return 0; +} + +STATIC void +xfs_set_diflags( + struct xfs_inode *ip, + unsigned int xflags) +{ + unsigned int di_flags; + + /* can't set PREALLOC this way, just preserve it */ + di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC); + if (xflags & XFS_XFLAG_IMMUTABLE) + di_flags |= XFS_DIFLAG_IMMUTABLE; + if (xflags & XFS_XFLAG_APPEND) + di_flags |= XFS_DIFLAG_APPEND; + if (xflags & XFS_XFLAG_SYNC) + di_flags |= XFS_DIFLAG_SYNC; + if (xflags & XFS_XFLAG_NOATIME) + di_flags |= XFS_DIFLAG_NOATIME; + if (xflags & XFS_XFLAG_NODUMP) + di_flags |= XFS_DIFLAG_NODUMP; + if (xflags & XFS_XFLAG_PROJINHERIT) + di_flags |= XFS_DIFLAG_PROJINHERIT; + if (xflags & XFS_XFLAG_NODEFRAG) + di_flags |= XFS_DIFLAG_NODEFRAG; + if (xflags & XFS_XFLAG_FILESTREAM) + di_flags |= XFS_DIFLAG_FILESTREAM; + if (S_ISDIR(ip->i_d.di_mode)) { + if (xflags & XFS_XFLAG_RTINHERIT) + di_flags |= XFS_DIFLAG_RTINHERIT; + if (xflags & XFS_XFLAG_NOSYMLINKS) + di_flags |= XFS_DIFLAG_NOSYMLINKS; + if (xflags & XFS_XFLAG_EXTSZINHERIT) + di_flags |= XFS_DIFLAG_EXTSZINHERIT; + } else if (S_ISREG(ip->i_d.di_mode)) { + if (xflags & XFS_XFLAG_REALTIME) + di_flags |= XFS_DIFLAG_REALTIME; + if (xflags & XFS_XFLAG_EXTSIZE) + di_flags |= XFS_DIFLAG_EXTSIZE; + } + + ip->i_d.di_flags = di_flags; +} + +STATIC void +xfs_diflags_to_linux( + struct xfs_inode *ip) +{ + struct inode *inode = VFS_I(ip); + unsigned int xflags = xfs_ip2xflags(ip); + + if (xflags & XFS_XFLAG_IMMUTABLE) + inode->i_flags |= S_IMMUTABLE; + else + inode->i_flags &= ~S_IMMUTABLE; + if (xflags & XFS_XFLAG_APPEND) + inode->i_flags |= S_APPEND; + else + inode->i_flags &= ~S_APPEND; + if (xflags & XFS_XFLAG_SYNC) + inode->i_flags |= S_SYNC; + else + inode->i_flags &= ~S_SYNC; + if (xflags & XFS_XFLAG_NOATIME) + inode->i_flags |= S_NOATIME; + else + inode->i_flags &= ~S_NOATIME; +} + +#define FSX_PROJID 1 +#define FSX_EXTSIZE 2 +#define FSX_XFLAGS 4 +#define FSX_NONBLOCK 8 + +STATIC int +xfs_ioctl_setattr( + xfs_inode_t *ip, + struct fsxattr *fa, + int mask) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + unsigned int lock_flags = 0; + struct xfs_dquot *udqp = NULL; + struct xfs_dquot *gdqp = NULL; + struct xfs_dquot *olddquot = NULL; + int code; + + trace_xfs_ioctl_setattr(ip); + + if (mp->m_flags & XFS_MOUNT_RDONLY) + return XFS_ERROR(EROFS); + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + /* + * Disallow 32bit project ids when projid32bit feature is not enabled. + */ + if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) && + !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) + return XFS_ERROR(EINVAL); + + /* + * If disk quotas is on, we make sure that the dquots do exist on disk, + * before we start any other transactions. Trying to do this later + * is messy. We don't care to take a readlock to look at the ids + * in inode here, because we can't hold it across the trans_reserve. + * If the IDs do change before we take the ilock, we're covered + * because the i_*dquot fields will get updated anyway. + */ + if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) { + code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid, + ip->i_d.di_gid, fa->fsx_projid, + XFS_QMOPT_PQUOTA, &udqp, &gdqp); + if (code) + return code; + } + + /* + * For the other attributes, we acquire the inode lock and + * first do an error checking pass. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); + if (code) + goto error_return; + + lock_flags = XFS_ILOCK_EXCL; + xfs_ilock(ip, lock_flags); + + /* + * CAP_FOWNER overrides the following restrictions: + * + * The user ID of the calling process must be equal + * to the file owner ID, except in cases where the + * CAP_FSETID capability is applicable. + */ + if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + + /* + * Do a quota reservation only if projid is actually going to change. + */ + if (mask & FSX_PROJID) { + if (XFS_IS_QUOTA_RUNNING(mp) && + XFS_IS_PQUOTA_ON(mp) && + xfs_get_projid(ip) != fa->fsx_projid) { + ASSERT(tp); + code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, + capable(CAP_FOWNER) ? + XFS_QMOPT_FORCE_RES : 0); + if (code) /* out of quota */ + goto error_return; + } + } + + if (mask & FSX_EXTSIZE) { + /* + * Can't change extent size if any extents are allocated. + */ + if (ip->i_d.di_nextents && + ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != + fa->fsx_extsize)) { + code = XFS_ERROR(EINVAL); /* EFBIG? */ + goto error_return; + } + + /* + * Extent size must be a multiple of the appropriate block + * size, if set at all. It must also be smaller than the + * maximum extent size supported by the filesystem. + * + * Also, for non-realtime files, limit the extent size hint to + * half the size of the AGs in the filesystem so alignment + * doesn't result in extents larger than an AG. + */ + if (fa->fsx_extsize != 0) { + xfs_extlen_t size; + xfs_fsblock_t extsize_fsb; + + extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); + if (extsize_fsb > MAXEXTLEN) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + + if (XFS_IS_REALTIME_INODE(ip) || + ((mask & FSX_XFLAGS) && + (fa->fsx_xflags & XFS_XFLAG_REALTIME))) { + size = mp->m_sb.sb_rextsize << + mp->m_sb.sb_blocklog; + } else { + size = mp->m_sb.sb_blocksize; + if (extsize_fsb > mp->m_sb.sb_agblocks / 2) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + + if (fa->fsx_extsize % size) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + } + + + if (mask & FSX_XFLAGS) { + /* + * Can't change realtime flag if any extents are allocated. + */ + if ((ip->i_d.di_nextents || ip->i_delayed_blks) && + (XFS_IS_REALTIME_INODE(ip)) != + (fa->fsx_xflags & XFS_XFLAG_REALTIME)) { + code = XFS_ERROR(EINVAL); /* EFBIG? */ + goto error_return; + } + + /* + * If realtime flag is set then must have realtime data. + */ + if ((fa->fsx_xflags & XFS_XFLAG_REALTIME)) { + if ((mp->m_sb.sb_rblocks == 0) || + (mp->m_sb.sb_rextsize == 0) || + (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { + code = XFS_ERROR(EINVAL); + goto error_return; + } + } + + /* + * Can't modify an immutable/append-only file unless + * we have appropriate permission. + */ + if ((ip->i_d.di_flags & + (XFS_DIFLAG_IMMUTABLE|XFS_DIFLAG_APPEND) || + (fa->fsx_xflags & + (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && + !capable(CAP_LINUX_IMMUTABLE)) { + code = XFS_ERROR(EPERM); + goto error_return; + } + } + + xfs_trans_ijoin(tp, ip); + + /* + * Change file ownership. Must be the owner or privileged. + */ + if (mask & FSX_PROJID) { + /* + * CAP_FSETID overrides the following restrictions: + * + * The set-user-ID and set-group-ID bits of a file will be + * cleared upon successful return from chown() + */ + if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && + !capable(CAP_FSETID)) + ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); + + /* + * Change the ownerships and register quota modifications + * in the transaction. + */ + if (xfs_get_projid(ip) != fa->fsx_projid) { + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) { + olddquot = xfs_qm_vop_chown(tp, ip, + &ip->i_gdquot, gdqp); + } + xfs_set_projid(ip, fa->fsx_projid); + + /* + * We may have to rev the inode as well as + * the superblock version number since projids didn't + * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. + */ + if (ip->i_d.di_version == 1) + xfs_bump_ino_vers2(tp, ip); + } + + } + + if (mask & FSX_EXTSIZE) + ip->i_d.di_extsize = fa->fsx_extsize >> mp->m_sb.sb_blocklog; + if (mask & FSX_XFLAGS) { + xfs_set_diflags(ip, fa->fsx_xflags); + xfs_diflags_to_linux(ip); + } + + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + XFS_STATS_INC(xs_ig_attrchg); + + /* + * If this is a synchronous mount, make sure that the + * transaction goes to disk before returning to the user. + * This is slightly sub-optimal in that truncates require + * two sync transactions instead of one for wsync filesystems. + * One for the truncate and one for the timestamps since we + * don't want to change the timestamps unless we're sure the + * truncate worked. Truncates are less than 1% of the laddis + * mix so this probably isn't worth the trouble to optimize. + */ + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + code = xfs_trans_commit(tp, 0); + xfs_iunlock(ip, lock_flags); + + /* + * Release any dquot(s) the inode had kept before chown. + */ + xfs_qm_dqrele(olddquot); + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); + + return code; + + error_return: + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); + xfs_trans_cancel(tp, 0); + if (lock_flags) + xfs_iunlock(ip, lock_flags); + return code; +} + +STATIC int +xfs_ioc_fssetxattr( + xfs_inode_t *ip, + struct file *filp, + void __user *arg) +{ + struct fsxattr fa; + unsigned int mask; + + if (copy_from_user(&fa, arg, sizeof(fa))) + return -EFAULT; + + mask = FSX_XFLAGS | FSX_EXTSIZE | FSX_PROJID; + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + mask |= FSX_NONBLOCK; + + return -xfs_ioctl_setattr(ip, &fa, mask); +} + +STATIC int +xfs_ioc_getxflags( + xfs_inode_t *ip, + void __user *arg) +{ + unsigned int flags; + + flags = xfs_di2lxflags(ip->i_d.di_flags); + if (copy_to_user(arg, &flags, sizeof(flags))) + return -EFAULT; + return 0; +} + +STATIC int +xfs_ioc_setxflags( + xfs_inode_t *ip, + struct file *filp, + void __user *arg) +{ + struct fsxattr fa; + unsigned int flags; + unsigned int mask; + + if (copy_from_user(&flags, arg, sizeof(flags))) + return -EFAULT; + + if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ + FS_NOATIME_FL | FS_NODUMP_FL | \ + FS_SYNC_FL)) + return -EOPNOTSUPP; + + mask = FSX_XFLAGS; + if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) + mask |= FSX_NONBLOCK; + fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); + + return -xfs_ioctl_setattr(ip, &fa, mask); +} + +STATIC int +xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full) +{ + struct getbmap __user *base = *ap; + + /* copy only getbmap portion (not getbmapx) */ + if (copy_to_user(base, bmv, sizeof(struct getbmap))) + return XFS_ERROR(EFAULT); + + *ap += sizeof(struct getbmap); + return 0; +} + +STATIC int +xfs_ioc_getbmap( + struct xfs_inode *ip, + int ioflags, + unsigned int cmd, + void __user *arg) +{ + struct getbmapx bmx; + int error; + + if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) + return -XFS_ERROR(EFAULT); + + if (bmx.bmv_count < 2) + return -XFS_ERROR(EINVAL); + + bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); + if (ioflags & IO_INVIS) + bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; + + error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, + (struct getbmap *)arg+1); + if (error) + return -error; + + /* copy back header - only size of getbmap */ + if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full) +{ + struct getbmapx __user *base = *ap; + + if (copy_to_user(base, bmv, sizeof(struct getbmapx))) + return XFS_ERROR(EFAULT); + + *ap += sizeof(struct getbmapx); + return 0; +} + +STATIC int +xfs_ioc_getbmapx( + struct xfs_inode *ip, + void __user *arg) +{ + struct getbmapx bmx; + int error; + + if (copy_from_user(&bmx, arg, sizeof(bmx))) + return -XFS_ERROR(EFAULT); + + if (bmx.bmv_count < 2) + return -XFS_ERROR(EINVAL); + + if (bmx.bmv_iflags & (~BMV_IF_VALID)) + return -XFS_ERROR(EINVAL); + + error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, + (struct getbmapx *)arg+1); + if (error) + return -error; + + /* copy back header */ + if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) + return -XFS_ERROR(EFAULT); + + return 0; +} + +/* + * Note: some of the ioctl's return positive numbers as a + * byte count indicating success, such as readlink_by_handle. + * So we don't "sign flip" like most other routines. This means + * true errors need to be returned as a negative value. + */ +long +xfs_file_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long p) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + void __user *arg = (void __user *)p; + int ioflags = 0; + int error; + + if (filp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + + trace_xfs_file_ioctl(ip); + + switch (cmd) { + case FITRIM: + return xfs_ioc_trim(mp, arg); + case XFS_IOC_ALLOCSP: + case XFS_IOC_FREESP: + case XFS_IOC_RESVSP: + case XFS_IOC_UNRESVSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP64: + case XFS_IOC_RESVSP64: + case XFS_IOC_UNRESVSP64: + case XFS_IOC_ZERO_RANGE: { + xfs_flock64_t bf; + + if (copy_from_user(&bf, arg, sizeof(bf))) + return -XFS_ERROR(EFAULT); + return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); + } + case XFS_IOC_DIOINFO: { + struct dioattr da; + xfs_buftarg_t *target = + XFS_IS_REALTIME_INODE(ip) ? + mp->m_rtdev_targp : mp->m_ddev_targp; + + da.d_mem = da.d_miniosz = 1 << target->bt_sshift; + da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); + + if (copy_to_user(arg, &da, sizeof(da))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_FSBULKSTAT_SINGLE: + case XFS_IOC_FSBULKSTAT: + case XFS_IOC_FSINUMBERS: + return xfs_ioc_bulkstat(mp, cmd, arg); + + case XFS_IOC_FSGEOMETRY_V1: + return xfs_ioc_fsgeometry_v1(mp, arg); + + case XFS_IOC_FSGEOMETRY: + return xfs_ioc_fsgeometry(mp, arg); + + case XFS_IOC_GETVERSION: + return put_user(inode->i_generation, (int __user *)arg); + + case XFS_IOC_FSGETXATTR: + return xfs_ioc_fsgetxattr(ip, 0, arg); + case XFS_IOC_FSGETXATTRA: + return xfs_ioc_fsgetxattr(ip, 1, arg); + case XFS_IOC_FSSETXATTR: + return xfs_ioc_fssetxattr(ip, filp, arg); + case XFS_IOC_GETXFLAGS: + return xfs_ioc_getxflags(ip, arg); + case XFS_IOC_SETXFLAGS: + return xfs_ioc_setxflags(ip, filp, arg); + + case XFS_IOC_FSSETDM: { + struct fsdmidata dmi; + + if (copy_from_user(&dmi, arg, sizeof(dmi))) + return -XFS_ERROR(EFAULT); + + error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, + dmi.fsd_dmstate); + return -error; + } + + case XFS_IOC_GETBMAP: + case XFS_IOC_GETBMAPA: + return xfs_ioc_getbmap(ip, ioflags, cmd, arg); + + case XFS_IOC_GETBMAPX: + return xfs_ioc_getbmapx(ip, arg); + + case XFS_IOC_FD_TO_HANDLE: + case XFS_IOC_PATH_TO_HANDLE: + case XFS_IOC_PATH_TO_FSHANDLE: { + xfs_fsop_handlereq_t hreq; + + if (copy_from_user(&hreq, arg, sizeof(hreq))) + return -XFS_ERROR(EFAULT); + return xfs_find_handle(cmd, &hreq); + } + case XFS_IOC_OPEN_BY_HANDLE: { + xfs_fsop_handlereq_t hreq; + + if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) + return -XFS_ERROR(EFAULT); + return xfs_open_by_handle(filp, &hreq); + } + case XFS_IOC_FSSETDM_BY_HANDLE: + return xfs_fssetdm_by_handle(filp, arg); + + case XFS_IOC_READLINK_BY_HANDLE: { + xfs_fsop_handlereq_t hreq; + + if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) + return -XFS_ERROR(EFAULT); + return xfs_readlink_by_handle(filp, &hreq); + } + case XFS_IOC_ATTRLIST_BY_HANDLE: + return xfs_attrlist_by_handle(filp, arg); + + case XFS_IOC_ATTRMULTI_BY_HANDLE: + return xfs_attrmulti_by_handle(filp, arg); + + case XFS_IOC_SWAPEXT: { + struct xfs_swapext sxp; + + if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) + return -XFS_ERROR(EFAULT); + error = xfs_swapext(&sxp); + return -error; + } + + case XFS_IOC_FSCOUNTS: { + xfs_fsop_counts_t out; + + error = xfs_fs_counts(mp, &out); + if (error) + return -error; + + if (copy_to_user(arg, &out, sizeof(out))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_SET_RESBLKS: { + xfs_fsop_resblks_t inout; + __uint64_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (mp->m_flags & XFS_MOUNT_RDONLY) + return -XFS_ERROR(EROFS); + + if (copy_from_user(&inout, arg, sizeof(inout))) + return -XFS_ERROR(EFAULT); + + /* input parameter is passed in resblks field of structure */ + in = inout.resblks; + error = xfs_reserve_blocks(mp, &in, &inout); + if (error) + return -error; + + if (copy_to_user(arg, &inout, sizeof(inout))) + return -XFS_ERROR(EFAULT); + return 0; + } + + case XFS_IOC_GET_RESBLKS: { + xfs_fsop_resblks_t out; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + error = xfs_reserve_blocks(mp, NULL, &out); + if (error) + return -error; + + if (copy_to_user(arg, &out, sizeof(out))) + return -XFS_ERROR(EFAULT); + + return 0; + } + + case XFS_IOC_FSGROWFSDATA: { + xfs_growfs_data_t in; + + if (copy_from_user(&in, arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_data(mp, &in); + return -error; + } + + case XFS_IOC_FSGROWFSLOG: { + xfs_growfs_log_t in; + + if (copy_from_user(&in, arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_log(mp, &in); + return -error; + } + + case XFS_IOC_FSGROWFSRT: { + xfs_growfs_rt_t in; + + if (copy_from_user(&in, arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_growfs_rt(mp, &in); + return -error; + } + + case XFS_IOC_GOINGDOWN: { + __uint32_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (get_user(in, (__uint32_t __user *)arg)) + return -XFS_ERROR(EFAULT); + + error = xfs_fs_goingdown(mp, in); + return -error; + } + + case XFS_IOC_ERROR_INJECTION: { + xfs_error_injection_t in; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&in, arg, sizeof(in))) + return -XFS_ERROR(EFAULT); + + error = xfs_errortag_add(in.errtag, mp); + return -error; + } + + case XFS_IOC_ERROR_CLEARALL: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + error = xfs_errortag_clearall(mp, 1); + return -error; + + default: + return -ENOTTY; + } +} diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h new file mode 100644 index 0000000..d56173b --- /dev/null +++ b/fs/xfs/xfs_ioctl.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_IOCTL_H__ +#define __XFS_IOCTL_H__ + +extern int +xfs_ioc_space( + struct xfs_inode *ip, + struct inode *inode, + struct file *filp, + int ioflags, + unsigned int cmd, + xfs_flock64_t *bf); + +extern int +xfs_find_handle( + unsigned int cmd, + xfs_fsop_handlereq_t *hreq); + +extern int +xfs_open_by_handle( + struct file *parfilp, + xfs_fsop_handlereq_t *hreq); + +extern int +xfs_readlink_by_handle( + struct file *parfilp, + xfs_fsop_handlereq_t *hreq); + +extern int +xfs_attrmulti_attr_get( + struct inode *inode, + unsigned char *name, + unsigned char __user *ubuf, + __uint32_t *len, + __uint32_t flags); + +extern int +xfs_attrmulti_attr_set( + struct inode *inode, + unsigned char *name, + const unsigned char __user *ubuf, + __uint32_t len, + __uint32_t flags); + +extern int +xfs_attrmulti_attr_remove( + struct inode *inode, + unsigned char *name, + __uint32_t flags); + +extern struct dentry * +xfs_handle_to_dentry( + struct file *parfilp, + void __user *uhandle, + u32 hlen); + +extern long +xfs_file_ioctl( + struct file *filp, + unsigned int cmd, + unsigned long p); + +extern long +xfs_file_compat_ioctl( + struct file *file, + unsigned int cmd, + unsigned long arg); + +#endif diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c new file mode 100644 index 0000000..54e623b --- /dev/null +++ b/fs/xfs/xfs_ioctl32.c @@ -0,0 +1,672 @@ +/* + * Copyright (c) 2004-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_vnode.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_error.h" +#include "xfs_dfrag.h" +#include "xfs_vnodeops.h" +#include "xfs_fsops.h" +#include "xfs_alloc.h" +#include "xfs_rtalloc.h" +#include "xfs_attr.h" +#include "xfs_ioctl.h" +#include "xfs_ioctl32.h" +#include "xfs_trace.h" + +#define _NATIVE_IOC(cmd, type) \ + _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) + +#ifdef BROKEN_X86_ALIGNMENT +STATIC int +xfs_compat_flock64_copyin( + xfs_flock64_t *bf, + compat_xfs_flock64_t __user *arg32) +{ + if (get_user(bf->l_type, &arg32->l_type) || + get_user(bf->l_whence, &arg32->l_whence) || + get_user(bf->l_start, &arg32->l_start) || + get_user(bf->l_len, &arg32->l_len) || + get_user(bf->l_sysid, &arg32->l_sysid) || + get_user(bf->l_pid, &arg32->l_pid) || + copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_compat_ioc_fsgeometry_v1( + struct xfs_mount *mp, + compat_xfs_fsop_geom_v1_t __user *arg32) +{ + xfs_fsop_geom_t fsgeo; + int error; + + error = xfs_fs_geometry(mp, &fsgeo, 3); + if (error) + return -error; + /* The 32-bit variant simply has some padding at the end */ + if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_compat_growfs_data_copyin( + struct xfs_growfs_data *in, + compat_xfs_growfs_data_t __user *arg32) +{ + if (get_user(in->newblocks, &arg32->newblocks) || + get_user(in->imaxpct, &arg32->imaxpct)) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_compat_growfs_rt_copyin( + struct xfs_growfs_rt *in, + compat_xfs_growfs_rt_t __user *arg32) +{ + if (get_user(in->newblocks, &arg32->newblocks) || + get_user(in->extsize, &arg32->extsize)) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_inumbers_fmt_compat( + void __user *ubuffer, + const xfs_inogrp_t *buffer, + long count, + long *written) +{ + compat_xfs_inogrp_t __user *p32 = ubuffer; + long i; + + for (i = 0; i < count; i++) { + if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || + put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || + put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) + return -XFS_ERROR(EFAULT); + } + *written = count * sizeof(*p32); + return 0; +} + +#else +#define xfs_inumbers_fmt_compat xfs_inumbers_fmt +#endif /* BROKEN_X86_ALIGNMENT */ + +STATIC int +xfs_ioctl32_bstime_copyin( + xfs_bstime_t *bstime, + compat_xfs_bstime_t __user *bstime32) +{ + compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */ + + if (get_user(sec32, &bstime32->tv_sec) || + get_user(bstime->tv_nsec, &bstime32->tv_nsec)) + return -XFS_ERROR(EFAULT); + bstime->tv_sec = sec32; + return 0; +} + +/* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */ +STATIC int +xfs_ioctl32_bstat_copyin( + xfs_bstat_t *bstat, + compat_xfs_bstat_t __user *bstat32) +{ + if (get_user(bstat->bs_ino, &bstat32->bs_ino) || + get_user(bstat->bs_mode, &bstat32->bs_mode) || + get_user(bstat->bs_nlink, &bstat32->bs_nlink) || + get_user(bstat->bs_uid, &bstat32->bs_uid) || + get_user(bstat->bs_gid, &bstat32->bs_gid) || + get_user(bstat->bs_rdev, &bstat32->bs_rdev) || + get_user(bstat->bs_blksize, &bstat32->bs_blksize) || + get_user(bstat->bs_size, &bstat32->bs_size) || + xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) || + xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) || + xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) || + get_user(bstat->bs_blocks, &bstat32->bs_size) || + get_user(bstat->bs_xflags, &bstat32->bs_size) || + get_user(bstat->bs_extsize, &bstat32->bs_extsize) || + get_user(bstat->bs_extents, &bstat32->bs_extents) || + get_user(bstat->bs_gen, &bstat32->bs_gen) || + get_user(bstat->bs_projid_lo, &bstat32->bs_projid_lo) || + get_user(bstat->bs_projid_hi, &bstat32->bs_projid_hi) || + get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || + get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || + get_user(bstat->bs_aextents, &bstat32->bs_aextents)) + return -XFS_ERROR(EFAULT); + return 0; +} + +/* XFS_IOC_FSBULKSTAT and friends */ + +STATIC int +xfs_bstime_store_compat( + compat_xfs_bstime_t __user *p32, + const xfs_bstime_t *p) +{ + __s32 sec32; + + sec32 = p->tv_sec; + if (put_user(sec32, &p32->tv_sec) || + put_user(p->tv_nsec, &p32->tv_nsec)) + return -XFS_ERROR(EFAULT); + return 0; +} + +/* Return 0 on success or positive error (to xfs_bulkstat()) */ +STATIC int +xfs_bulkstat_one_fmt_compat( + void __user *ubuffer, + int ubsize, + int *ubused, + const xfs_bstat_t *buffer) +{ + compat_xfs_bstat_t __user *p32 = ubuffer; + + if (ubsize < sizeof(*p32)) + return XFS_ERROR(ENOMEM); + + if (put_user(buffer->bs_ino, &p32->bs_ino) || + put_user(buffer->bs_mode, &p32->bs_mode) || + put_user(buffer->bs_nlink, &p32->bs_nlink) || + put_user(buffer->bs_uid, &p32->bs_uid) || + put_user(buffer->bs_gid, &p32->bs_gid) || + put_user(buffer->bs_rdev, &p32->bs_rdev) || + put_user(buffer->bs_blksize, &p32->bs_blksize) || + put_user(buffer->bs_size, &p32->bs_size) || + xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || + xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || + xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || + put_user(buffer->bs_blocks, &p32->bs_blocks) || + put_user(buffer->bs_xflags, &p32->bs_xflags) || + put_user(buffer->bs_extsize, &p32->bs_extsize) || + put_user(buffer->bs_extents, &p32->bs_extents) || + put_user(buffer->bs_gen, &p32->bs_gen) || + put_user(buffer->bs_projid, &p32->bs_projid) || + put_user(buffer->bs_projid_hi, &p32->bs_projid_hi) || + put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || + put_user(buffer->bs_dmstate, &p32->bs_dmstate) || + put_user(buffer->bs_aextents, &p32->bs_aextents)) + return XFS_ERROR(EFAULT); + if (ubused) + *ubused = sizeof(*p32); + return 0; +} + +STATIC int +xfs_bulkstat_one_compat( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_ino_t ino, /* inode number to get data for */ + void __user *buffer, /* buffer to place output in */ + int ubsize, /* size of buffer */ + int *ubused, /* bytes used by me */ + int *stat) /* BULKSTAT_RV_... */ +{ + return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, + xfs_bulkstat_one_fmt_compat, + ubused, stat); +} + +/* copied from xfs_ioctl.c */ +STATIC int +xfs_compat_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + compat_xfs_fsop_bulkreq_t __user *p32) +{ + u32 addr; + xfs_fsop_bulkreq_t bulkreq; + int count; /* # of records returned */ + xfs_ino_t inlast; /* last inode number */ + int done; + int error; + + /* done = 1 if there are more stats to get and if bulkstat */ + /* should be called again (unused here, but used in dmapi) */ + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + if (get_user(addr, &p32->lastip)) + return -XFS_ERROR(EFAULT); + bulkreq.lastip = compat_ptr(addr); + if (get_user(bulkreq.icount, &p32->icount) || + get_user(addr, &p32->ubuffer)) + return -XFS_ERROR(EFAULT); + bulkreq.ubuffer = compat_ptr(addr); + if (get_user(addr, &p32->ocount)) + return -XFS_ERROR(EFAULT); + bulkreq.ocount = compat_ptr(addr); + + if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) + return -XFS_ERROR(EFAULT); + + if ((count = bulkreq.icount) <= 0) + return -XFS_ERROR(EINVAL); + + if (bulkreq.ubuffer == NULL) + return -XFS_ERROR(EINVAL); + + if (cmd == XFS_IOC_FSINUMBERS_32) { + error = xfs_inumbers(mp, &inlast, &count, + bulkreq.ubuffer, xfs_inumbers_fmt_compat); + } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) { + int res; + + error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, + sizeof(compat_xfs_bstat_t), 0, &res); + } else if (cmd == XFS_IOC_FSBULKSTAT_32) { + error = xfs_bulkstat(mp, &inlast, &count, + xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), + bulkreq.ubuffer, &done); + } else + error = XFS_ERROR(EINVAL); + if (error) + return -error; + + if (bulkreq.ocount != NULL) { + if (copy_to_user(bulkreq.lastip, &inlast, + sizeof(xfs_ino_t))) + return -XFS_ERROR(EFAULT); + + if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) + return -XFS_ERROR(EFAULT); + } + + return 0; +} + +STATIC int +xfs_compat_handlereq_copyin( + xfs_fsop_handlereq_t *hreq, + compat_xfs_fsop_handlereq_t __user *arg32) +{ + compat_xfs_fsop_handlereq_t hreq32; + + if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) + return -XFS_ERROR(EFAULT); + + hreq->fd = hreq32.fd; + hreq->path = compat_ptr(hreq32.path); + hreq->oflags = hreq32.oflags; + hreq->ihandle = compat_ptr(hreq32.ihandle); + hreq->ihandlen = hreq32.ihandlen; + hreq->ohandle = compat_ptr(hreq32.ohandle); + hreq->ohandlen = compat_ptr(hreq32.ohandlen); + + return 0; +} + +STATIC struct dentry * +xfs_compat_handlereq_to_dentry( + struct file *parfilp, + compat_xfs_fsop_handlereq_t *hreq) +{ + return xfs_handle_to_dentry(parfilp, + compat_ptr(hreq->ihandle), hreq->ihandlen); +} + +STATIC int +xfs_compat_attrlist_by_handle( + struct file *parfilp, + void __user *arg) +{ + int error; + attrlist_cursor_kern_t *cursor; + compat_xfs_fsop_attrlist_handlereq_t al_hreq; + struct dentry *dentry; + char *kbuf; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&al_hreq, arg, + sizeof(compat_xfs_fsop_attrlist_handlereq_t))) + return -XFS_ERROR(EFAULT); + if (al_hreq.buflen > XATTR_LIST_MAX) + return -XFS_ERROR(EINVAL); + + /* + * Reject flags, only allow namespaces. + */ + if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) + return -XFS_ERROR(EINVAL); + + dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + error = -ENOMEM; + kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); + if (!kbuf) + goto out_dput; + + cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; + error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, + al_hreq.flags, cursor); + if (error) + goto out_kfree; + + if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen)) + error = -EFAULT; + + out_kfree: + kfree(kbuf); + out_dput: + dput(dentry); + return error; +} + +STATIC int +xfs_compat_attrmulti_by_handle( + struct file *parfilp, + void __user *arg) +{ + int error; + compat_xfs_attr_multiop_t *ops; + compat_xfs_fsop_attrmulti_handlereq_t am_hreq; + struct dentry *dentry; + unsigned int i, size; + unsigned char *attr_name; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&am_hreq, arg, + sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) + return -XFS_ERROR(EFAULT); + + /* overflow check */ + if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) + return -E2BIG; + + dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + error = E2BIG; + size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); + if (!size || size > 16 * PAGE_SIZE) + goto out_dput; + + ops = memdup_user(compat_ptr(am_hreq.ops), size); + if (IS_ERR(ops)) { + error = PTR_ERR(ops); + goto out_dput; + } + + attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); + if (!attr_name) + goto out_kfree_ops; + + error = 0; + for (i = 0; i < am_hreq.opcount; i++) { + ops[i].am_error = strncpy_from_user((char *)attr_name, + compat_ptr(ops[i].am_attrname), + MAXNAMELEN); + if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) + error = -ERANGE; + if (ops[i].am_error < 0) + break; + + switch (ops[i].am_opcode) { + case ATTR_OP_GET: + ops[i].am_error = xfs_attrmulti_attr_get( + dentry->d_inode, attr_name, + compat_ptr(ops[i].am_attrvalue), + &ops[i].am_length, ops[i].am_flags); + break; + case ATTR_OP_SET: + ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); + if (ops[i].am_error) + break; + ops[i].am_error = xfs_attrmulti_attr_set( + dentry->d_inode, attr_name, + compat_ptr(ops[i].am_attrvalue), + ops[i].am_length, ops[i].am_flags); + mnt_drop_write(parfilp->f_path.mnt); + break; + case ATTR_OP_REMOVE: + ops[i].am_error = mnt_want_write(parfilp->f_path.mnt); + if (ops[i].am_error) + break; + ops[i].am_error = xfs_attrmulti_attr_remove( + dentry->d_inode, attr_name, + ops[i].am_flags); + mnt_drop_write(parfilp->f_path.mnt); + break; + default: + ops[i].am_error = EINVAL; + } + } + + if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) + error = XFS_ERROR(EFAULT); + + kfree(attr_name); + out_kfree_ops: + kfree(ops); + out_dput: + dput(dentry); + return -error; +} + +STATIC int +xfs_compat_fssetdm_by_handle( + struct file *parfilp, + void __user *arg) +{ + int error; + struct fsdmidata fsd; + compat_xfs_fsop_setdm_handlereq_t dmhreq; + struct dentry *dentry; + + if (!capable(CAP_MKNOD)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&dmhreq, arg, + sizeof(compat_xfs_fsop_setdm_handlereq_t))) + return -XFS_ERROR(EFAULT); + + dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { + error = -XFS_ERROR(EPERM); + goto out; + } + + if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { + error = -XFS_ERROR(EFAULT); + goto out; + } + + error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, + fsd.fsd_dmstate); + +out: + dput(dentry); + return error; +} + +long +xfs_file_compat_ioctl( + struct file *filp, + unsigned cmd, + unsigned long p) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + void __user *arg = (void __user *)p; + int ioflags = 0; + int error; + + if (filp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + + trace_xfs_file_compat_ioctl(ip); + + switch (cmd) { + /* No size or alignment issues on any arch */ + case XFS_IOC_DIOINFO: + case XFS_IOC_FSGEOMETRY: + case XFS_IOC_FSGETXATTR: + case XFS_IOC_FSSETXATTR: + case XFS_IOC_FSGETXATTRA: + case XFS_IOC_FSSETDM: + case XFS_IOC_GETBMAP: + case XFS_IOC_GETBMAPA: + case XFS_IOC_GETBMAPX: + case XFS_IOC_FSCOUNTS: + case XFS_IOC_SET_RESBLKS: + case XFS_IOC_GET_RESBLKS: + case XFS_IOC_FSGROWFSLOG: + case XFS_IOC_GOINGDOWN: + case XFS_IOC_ERROR_INJECTION: + case XFS_IOC_ERROR_CLEARALL: + return xfs_file_ioctl(filp, cmd, p); +#ifndef BROKEN_X86_ALIGNMENT + /* These are handled fine if no alignment issues */ + case XFS_IOC_ALLOCSP: + case XFS_IOC_FREESP: + case XFS_IOC_RESVSP: + case XFS_IOC_UNRESVSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP64: + case XFS_IOC_RESVSP64: + case XFS_IOC_UNRESVSP64: + case XFS_IOC_FSGEOMETRY_V1: + case XFS_IOC_FSGROWFSDATA: + case XFS_IOC_FSGROWFSRT: + case XFS_IOC_ZERO_RANGE: + return xfs_file_ioctl(filp, cmd, p); +#else + case XFS_IOC_ALLOCSP_32: + case XFS_IOC_FREESP_32: + case XFS_IOC_ALLOCSP64_32: + case XFS_IOC_FREESP64_32: + case XFS_IOC_RESVSP_32: + case XFS_IOC_UNRESVSP_32: + case XFS_IOC_RESVSP64_32: + case XFS_IOC_UNRESVSP64_32: + case XFS_IOC_ZERO_RANGE_32: { + struct xfs_flock64 bf; + + if (xfs_compat_flock64_copyin(&bf, arg)) + return -XFS_ERROR(EFAULT); + cmd = _NATIVE_IOC(cmd, struct xfs_flock64); + return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); + } + case XFS_IOC_FSGEOMETRY_V1_32: + return xfs_compat_ioc_fsgeometry_v1(mp, arg); + case XFS_IOC_FSGROWFSDATA_32: { + struct xfs_growfs_data in; + + if (xfs_compat_growfs_data_copyin(&in, arg)) + return -XFS_ERROR(EFAULT); + error = xfs_growfs_data(mp, &in); + return -error; + } + case XFS_IOC_FSGROWFSRT_32: { + struct xfs_growfs_rt in; + + if (xfs_compat_growfs_rt_copyin(&in, arg)) + return -XFS_ERROR(EFAULT); + error = xfs_growfs_rt(mp, &in); + return -error; + } +#endif + /* long changes size, but xfs only copiese out 32 bits */ + case XFS_IOC_GETXFLAGS_32: + case XFS_IOC_SETXFLAGS_32: + case XFS_IOC_GETVERSION_32: + cmd = _NATIVE_IOC(cmd, long); + return xfs_file_ioctl(filp, cmd, p); + case XFS_IOC_SWAPEXT_32: { + struct xfs_swapext sxp; + struct compat_xfs_swapext __user *sxu = arg; + + /* Bulk copy in up to the sx_stat field, then copy bstat */ + if (copy_from_user(&sxp, sxu, + offsetof(struct xfs_swapext, sx_stat)) || + xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) + return -XFS_ERROR(EFAULT); + error = xfs_swapext(&sxp); + return -error; + } + case XFS_IOC_FSBULKSTAT_32: + case XFS_IOC_FSBULKSTAT_SINGLE_32: + case XFS_IOC_FSINUMBERS_32: + return xfs_compat_ioc_bulkstat(mp, cmd, arg); + case XFS_IOC_FD_TO_HANDLE_32: + case XFS_IOC_PATH_TO_HANDLE_32: + case XFS_IOC_PATH_TO_FSHANDLE_32: { + struct xfs_fsop_handlereq hreq; + + if (xfs_compat_handlereq_copyin(&hreq, arg)) + return -XFS_ERROR(EFAULT); + cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); + return xfs_find_handle(cmd, &hreq); + } + case XFS_IOC_OPEN_BY_HANDLE_32: { + struct xfs_fsop_handlereq hreq; + + if (xfs_compat_handlereq_copyin(&hreq, arg)) + return -XFS_ERROR(EFAULT); + return xfs_open_by_handle(filp, &hreq); + } + case XFS_IOC_READLINK_BY_HANDLE_32: { + struct xfs_fsop_handlereq hreq; + + if (xfs_compat_handlereq_copyin(&hreq, arg)) + return -XFS_ERROR(EFAULT); + return xfs_readlink_by_handle(filp, &hreq); + } + case XFS_IOC_ATTRLIST_BY_HANDLE_32: + return xfs_compat_attrlist_by_handle(filp, arg); + case XFS_IOC_ATTRMULTI_BY_HANDLE_32: + return xfs_compat_attrmulti_by_handle(filp, arg); + case XFS_IOC_FSSETDM_BY_HANDLE_32: + return xfs_compat_fssetdm_by_handle(filp, arg); + default: + return -XFS_ERROR(ENOIOCTLCMD); + } +} diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h new file mode 100644 index 0000000..80f4060 --- /dev/null +++ b/fs/xfs/xfs_ioctl32.h @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2004-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_IOCTL32_H__ +#define __XFS_IOCTL32_H__ + +#include + +/* + * on 32-bit arches, ioctl argument structures may have different sizes + * and/or alignment. We define compat structures which match the + * 32-bit sizes/alignments here, and their associated ioctl numbers. + * + * xfs_ioctl32.c contains routines to copy these structures in and out. + */ + +/* stock kernel-level ioctls we support */ +#define XFS_IOC_GETXFLAGS_32 FS_IOC32_GETFLAGS +#define XFS_IOC_SETXFLAGS_32 FS_IOC32_SETFLAGS +#define XFS_IOC_GETVERSION_32 FS_IOC32_GETVERSION + +/* + * On intel, even if sizes match, alignment and/or padding may differ. + */ +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) +#define BROKEN_X86_ALIGNMENT +#define __compat_packed __attribute__((packed)) +#else +#define __compat_packed +#endif + +typedef struct compat_xfs_bstime { + compat_time_t tv_sec; /* seconds */ + __s32 tv_nsec; /* and nanoseconds */ +} compat_xfs_bstime_t; + +typedef struct compat_xfs_bstat { + __u64 bs_ino; /* inode number */ + __u16 bs_mode; /* type and mode */ + __u16 bs_nlink; /* number of links */ + __u32 bs_uid; /* user id */ + __u32 bs_gid; /* group id */ + __u32 bs_rdev; /* device value */ + __s32 bs_blksize; /* block size */ + __s64 bs_size; /* file size */ + compat_xfs_bstime_t bs_atime; /* access time */ + compat_xfs_bstime_t bs_mtime; /* modify time */ + compat_xfs_bstime_t bs_ctime; /* inode change time */ + int64_t bs_blocks; /* number of blocks */ + __u32 bs_xflags; /* extended flags */ + __s32 bs_extsize; /* extent size */ + __s32 bs_extents; /* number of extents */ + __u32 bs_gen; /* generation count */ + __u16 bs_projid_lo; /* lower part of project id */ +#define bs_projid bs_projid_lo /* (previously just bs_projid) */ + __u16 bs_projid_hi; /* high part of project id */ + unsigned char bs_pad[12]; /* pad space, unused */ + __u32 bs_dmevmask; /* DMIG event mask */ + __u16 bs_dmstate; /* DMIG state info */ + __u16 bs_aextents; /* attribute number of extents */ +} __compat_packed compat_xfs_bstat_t; + +typedef struct compat_xfs_fsop_bulkreq { + compat_uptr_t lastip; /* last inode # pointer */ + __s32 icount; /* count of entries in buffer */ + compat_uptr_t ubuffer; /* user buffer for inode desc. */ + compat_uptr_t ocount; /* output count pointer */ +} compat_xfs_fsop_bulkreq_t; + +#define XFS_IOC_FSBULKSTAT_32 \ + _IOWR('X', 101, struct compat_xfs_fsop_bulkreq) +#define XFS_IOC_FSBULKSTAT_SINGLE_32 \ + _IOWR('X', 102, struct compat_xfs_fsop_bulkreq) +#define XFS_IOC_FSINUMBERS_32 \ + _IOWR('X', 103, struct compat_xfs_fsop_bulkreq) + +typedef struct compat_xfs_fsop_handlereq { + __u32 fd; /* fd for FD_TO_HANDLE */ + compat_uptr_t path; /* user pathname */ + __u32 oflags; /* open flags */ + compat_uptr_t ihandle; /* user supplied handle */ + __u32 ihandlen; /* user supplied length */ + compat_uptr_t ohandle; /* user buffer for handle */ + compat_uptr_t ohandlen; /* user buffer length */ +} compat_xfs_fsop_handlereq_t; + +#define XFS_IOC_PATH_TO_FSHANDLE_32 \ + _IOWR('X', 104, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_PATH_TO_HANDLE_32 \ + _IOWR('X', 105, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_FD_TO_HANDLE_32 \ + _IOWR('X', 106, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_OPEN_BY_HANDLE_32 \ + _IOWR('X', 107, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_READLINK_BY_HANDLE_32 \ + _IOWR('X', 108, struct compat_xfs_fsop_handlereq) + +/* The bstat field in the swapext struct needs translation */ +typedef struct compat_xfs_swapext { + __int64_t sx_version; /* version */ + __int64_t sx_fdtarget; /* fd of target file */ + __int64_t sx_fdtmp; /* fd of tmp file */ + xfs_off_t sx_offset; /* offset into file */ + xfs_off_t sx_length; /* leng from offset */ + char sx_pad[16]; /* pad space, unused */ + compat_xfs_bstat_t sx_stat; /* stat of target b4 copy */ +} __compat_packed compat_xfs_swapext_t; + +#define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext) + +typedef struct compat_xfs_fsop_attrlist_handlereq { + struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */ + struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */ + __u32 flags; /* which namespace to use */ + __u32 buflen; /* length of buffer supplied */ + compat_uptr_t buffer; /* returned names */ +} __compat_packed compat_xfs_fsop_attrlist_handlereq_t; + +/* Note: actually this is read/write */ +#define XFS_IOC_ATTRLIST_BY_HANDLE_32 \ + _IOW('X', 122, struct compat_xfs_fsop_attrlist_handlereq) + +/* am_opcodes defined in xfs_fs.h */ +typedef struct compat_xfs_attr_multiop { + __u32 am_opcode; + __s32 am_error; + compat_uptr_t am_attrname; + compat_uptr_t am_attrvalue; + __u32 am_length; + __u32 am_flags; +} compat_xfs_attr_multiop_t; + +typedef struct compat_xfs_fsop_attrmulti_handlereq { + struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */ + __u32 opcount;/* count of following multiop */ + /* ptr to compat_xfs_attr_multiop */ + compat_uptr_t ops; /* attr_multi data */ +} compat_xfs_fsop_attrmulti_handlereq_t; + +#define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \ + _IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq) + +typedef struct compat_xfs_fsop_setdm_handlereq { + struct compat_xfs_fsop_handlereq hreq; /* handle information */ + /* ptr to struct fsdmidata */ + compat_uptr_t data; /* DMAPI data */ +} compat_xfs_fsop_setdm_handlereq_t; + +#define XFS_IOC_FSSETDM_BY_HANDLE_32 \ + _IOW('X', 121, struct compat_xfs_fsop_setdm_handlereq) + +#ifdef BROKEN_X86_ALIGNMENT +/* on ia32 l_start is on a 32-bit boundary */ +typedef struct compat_xfs_flock64 { + __s16 l_type; + __s16 l_whence; + __s64 l_start __attribute__((packed)); + /* len == 0 means until end of file */ + __s64 l_len __attribute__((packed)); + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserve area */ +} compat_xfs_flock64_t; + +#define XFS_IOC_ALLOCSP_32 _IOW('X', 10, struct compat_xfs_flock64) +#define XFS_IOC_FREESP_32 _IOW('X', 11, struct compat_xfs_flock64) +#define XFS_IOC_ALLOCSP64_32 _IOW('X', 36, struct compat_xfs_flock64) +#define XFS_IOC_FREESP64_32 _IOW('X', 37, struct compat_xfs_flock64) +#define XFS_IOC_RESVSP_32 _IOW('X', 40, struct compat_xfs_flock64) +#define XFS_IOC_UNRESVSP_32 _IOW('X', 41, struct compat_xfs_flock64) +#define XFS_IOC_RESVSP64_32 _IOW('X', 42, struct compat_xfs_flock64) +#define XFS_IOC_UNRESVSP64_32 _IOW('X', 43, struct compat_xfs_flock64) +#define XFS_IOC_ZERO_RANGE_32 _IOW('X', 57, struct compat_xfs_flock64) + +typedef struct compat_xfs_fsop_geom_v1 { + __u32 blocksize; /* filesystem (data) block size */ + __u32 rtextsize; /* realtime extent size */ + __u32 agblocks; /* fsblocks in an AG */ + __u32 agcount; /* number of allocation groups */ + __u32 logblocks; /* fsblocks in the log */ + __u32 sectsize; /* (data) sector size, bytes */ + __u32 inodesize; /* inode size in bytes */ + __u32 imaxpct; /* max allowed inode space(%) */ + __u64 datablocks; /* fsblocks in data subvolume */ + __u64 rtblocks; /* fsblocks in realtime subvol */ + __u64 rtextents; /* rt extents in realtime subvol*/ + __u64 logstart; /* starting fsblock of the log */ + unsigned char uuid[16]; /* unique id of the filesystem */ + __u32 sunit; /* stripe unit, fsblocks */ + __u32 swidth; /* stripe width, fsblocks */ + __s32 version; /* structure version */ + __u32 flags; /* superblock version flags */ + __u32 logsectsize; /* log sector size, bytes */ + __u32 rtsectsize; /* realtime sector size, bytes */ + __u32 dirblocksize; /* directory block size, bytes */ +} __attribute__((packed)) compat_xfs_fsop_geom_v1_t; + +#define XFS_IOC_FSGEOMETRY_V1_32 \ + _IOR('X', 100, struct compat_xfs_fsop_geom_v1) + +typedef struct compat_xfs_inogrp { + __u64 xi_startino; /* starting inode number */ + __s32 xi_alloccount; /* # bits set in allocmask */ + __u64 xi_allocmask; /* mask of allocated inodes */ +} __attribute__((packed)) compat_xfs_inogrp_t; + +/* These growfs input structures have padding on the end, so must translate */ +typedef struct compat_xfs_growfs_data { + __u64 newblocks; /* new data subvol size, fsblocks */ + __u32 imaxpct; /* new inode space percentage limit */ +} __attribute__((packed)) compat_xfs_growfs_data_t; + +typedef struct compat_xfs_growfs_rt { + __u64 newblocks; /* new realtime size, fsblocks */ + __u32 extsize; /* new realtime extent size, fsblocks */ +} __attribute__((packed)) compat_xfs_growfs_rt_t; + +#define XFS_IOC_FSGROWFSDATA_32 _IOW('X', 110, struct compat_xfs_growfs_data) +#define XFS_IOC_FSGROWFSRT_32 _IOW('X', 112, struct compat_xfs_growfs_rt) + +#endif /* BROKEN_X86_ALIGNMENT */ + +#endif /* __XFS_IOCTL32_H__ */ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c new file mode 100644 index 0000000..b9c172b --- /dev/null +++ b/fs/xfs/xfs_iops.c @@ -0,0 +1,1210 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_acl.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_rw.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_vnodeops.h" +#include "xfs_inode_item.h" +#include "xfs_trace.h" + +#include +#include +#include +#include +#include +#include +#include + +/* + * Bring the timestamps in the XFS inode uptodate. + * + * Used before writing the inode to disk. + */ +void +xfs_synchronize_times( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + + ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; + ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; + ip->i_d.di_ctime.t_sec = (__int32_t)inode->i_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = (__int32_t)inode->i_ctime.tv_nsec; + ip->i_d.di_mtime.t_sec = (__int32_t)inode->i_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = (__int32_t)inode->i_mtime.tv_nsec; +} + +/* + * If the linux inode is valid, mark it dirty. + * Used when committing a dirty inode into a transaction so that + * the inode will get written back by the linux code + */ +void +xfs_mark_inode_dirty_sync( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + + if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) + mark_inode_dirty_sync(inode); +} + +void +xfs_mark_inode_dirty( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + + if (!(inode->i_state & (I_WILL_FREE|I_FREEING))) + mark_inode_dirty(inode); +} + +/* + * Hook in SELinux. This is not quite correct yet, what we really need + * here (as we do for default ACLs) is a mechanism by which creation of + * these attrs can be journalled at inode creation time (along with the + * inode, of course, such that log replay can't cause these to be lost). + */ +STATIC int +xfs_init_security( + struct inode *inode, + struct inode *dir, + const struct qstr *qstr) +{ + struct xfs_inode *ip = XFS_I(inode); + size_t length; + void *value; + unsigned char *name; + int error; + + error = security_inode_init_security(inode, dir, qstr, (char **)&name, + &value, &length); + if (error) { + if (error == -EOPNOTSUPP) + return 0; + return -error; + } + + error = xfs_attr_set(ip, name, value, length, ATTR_SECURE); + + kfree(name); + kfree(value); + return error; +} + +static void +xfs_dentry_to_name( + struct xfs_name *namep, + struct dentry *dentry) +{ + namep->name = dentry->d_name.name; + namep->len = dentry->d_name.len; +} + +STATIC void +xfs_cleanup_inode( + struct inode *dir, + struct inode *inode, + struct dentry *dentry) +{ + struct xfs_name teardown; + + /* Oh, the horror. + * If we can't add the ACL or we fail in + * xfs_init_security we must back out. + * ENOSPC can hit here, among other things. + */ + xfs_dentry_to_name(&teardown, dentry); + + xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); + iput(inode); +} + +STATIC int +xfs_vn_mknod( + struct inode *dir, + struct dentry *dentry, + int mode, + dev_t rdev) +{ + struct inode *inode; + struct xfs_inode *ip = NULL; + struct posix_acl *default_acl = NULL; + struct xfs_name name; + int error; + + /* + * Irix uses Missed'em'V split, but doesn't want to see + * the upper 5 bits of (14bit) major. + */ + if (S_ISCHR(mode) || S_ISBLK(mode)) { + if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) + return -EINVAL; + rdev = sysv_encode_dev(rdev); + } else { + rdev = 0; + } + + if (IS_POSIXACL(dir)) { + default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); + if (IS_ERR(default_acl)) + return PTR_ERR(default_acl); + + if (!default_acl) + mode &= ~current_umask(); + } + + xfs_dentry_to_name(&name, dentry); + error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); + if (unlikely(error)) + goto out_free_acl; + + inode = VFS_I(ip); + + error = xfs_init_security(inode, dir, &dentry->d_name); + if (unlikely(error)) + goto out_cleanup_inode; + + if (default_acl) { + error = -xfs_inherit_acl(inode, default_acl); + default_acl = NULL; + if (unlikely(error)) + goto out_cleanup_inode; + } + + + d_instantiate(dentry, inode); + return -error; + + out_cleanup_inode: + xfs_cleanup_inode(dir, inode, dentry); + out_free_acl: + posix_acl_release(default_acl); + return -error; +} + +STATIC int +xfs_vn_create( + struct inode *dir, + struct dentry *dentry, + int mode, + struct nameidata *nd) +{ + return xfs_vn_mknod(dir, dentry, mode, 0); +} + +STATIC int +xfs_vn_mkdir( + struct inode *dir, + struct dentry *dentry, + int mode) +{ + return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); +} + +STATIC struct dentry * +xfs_vn_lookup( + struct inode *dir, + struct dentry *dentry, + struct nameidata *nd) +{ + struct xfs_inode *cip; + struct xfs_name name; + int error; + + if (dentry->d_name.len >= MAXNAMELEN) + return ERR_PTR(-ENAMETOOLONG); + + xfs_dentry_to_name(&name, dentry); + error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); + if (unlikely(error)) { + if (unlikely(error != ENOENT)) + return ERR_PTR(-error); + d_add(dentry, NULL); + return NULL; + } + + return d_splice_alias(VFS_I(cip), dentry); +} + +STATIC struct dentry * +xfs_vn_ci_lookup( + struct inode *dir, + struct dentry *dentry, + struct nameidata *nd) +{ + struct xfs_inode *ip; + struct xfs_name xname; + struct xfs_name ci_name; + struct qstr dname; + int error; + + if (dentry->d_name.len >= MAXNAMELEN) + return ERR_PTR(-ENAMETOOLONG); + + xfs_dentry_to_name(&xname, dentry); + error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); + if (unlikely(error)) { + if (unlikely(error != ENOENT)) + return ERR_PTR(-error); + /* + * call d_add(dentry, NULL) here when d_drop_negative_children + * is called in xfs_vn_mknod (ie. allow negative dentries + * with CI filesystems). + */ + return NULL; + } + + /* if exact match, just splice and exit */ + if (!ci_name.name) + return d_splice_alias(VFS_I(ip), dentry); + + /* else case-insensitive match... */ + dname.name = ci_name.name; + dname.len = ci_name.len; + dentry = d_add_ci(dentry, VFS_I(ip), &dname); + kmem_free(ci_name.name); + return dentry; +} + +STATIC int +xfs_vn_link( + struct dentry *old_dentry, + struct inode *dir, + struct dentry *dentry) +{ + struct inode *inode = old_dentry->d_inode; + struct xfs_name name; + int error; + + xfs_dentry_to_name(&name, dentry); + + error = xfs_link(XFS_I(dir), XFS_I(inode), &name); + if (unlikely(error)) + return -error; + + ihold(inode); + d_instantiate(dentry, inode); + return 0; +} + +STATIC int +xfs_vn_unlink( + struct inode *dir, + struct dentry *dentry) +{ + struct xfs_name name; + int error; + + xfs_dentry_to_name(&name, dentry); + + error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); + if (error) + return error; + + /* + * With unlink, the VFS makes the dentry "negative": no inode, + * but still hashed. This is incompatible with case-insensitive + * mode, so invalidate (unhash) the dentry in CI-mode. + */ + if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) + d_invalidate(dentry); + return 0; +} + +STATIC int +xfs_vn_symlink( + struct inode *dir, + struct dentry *dentry, + const char *symname) +{ + struct inode *inode; + struct xfs_inode *cip = NULL; + struct xfs_name name; + int error; + mode_t mode; + + mode = S_IFLNK | + (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); + xfs_dentry_to_name(&name, dentry); + + error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); + if (unlikely(error)) + goto out; + + inode = VFS_I(cip); + + error = xfs_init_security(inode, dir, &dentry->d_name); + if (unlikely(error)) + goto out_cleanup_inode; + + d_instantiate(dentry, inode); + return 0; + + out_cleanup_inode: + xfs_cleanup_inode(dir, inode, dentry); + out: + return -error; +} + +STATIC int +xfs_vn_rename( + struct inode *odir, + struct dentry *odentry, + struct inode *ndir, + struct dentry *ndentry) +{ + struct inode *new_inode = ndentry->d_inode; + struct xfs_name oname; + struct xfs_name nname; + + xfs_dentry_to_name(&oname, odentry); + xfs_dentry_to_name(&nname, ndentry); + + return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), + XFS_I(ndir), &nname, new_inode ? + XFS_I(new_inode) : NULL); +} + +/* + * careful here - this function can get called recursively, so + * we need to be very careful about how much stack we use. + * uio is kmalloced for this reason... + */ +STATIC void * +xfs_vn_follow_link( + struct dentry *dentry, + struct nameidata *nd) +{ + char *link; + int error = -ENOMEM; + + link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); + if (!link) + goto out_err; + + error = -xfs_readlink(XFS_I(dentry->d_inode), link); + if (unlikely(error)) + goto out_kfree; + + nd_set_link(nd, link); + return NULL; + + out_kfree: + kfree(link); + out_err: + nd_set_link(nd, ERR_PTR(error)); + return NULL; +} + +STATIC void +xfs_vn_put_link( + struct dentry *dentry, + struct nameidata *nd, + void *p) +{ + char *s = nd_get_link(nd); + + if (!IS_ERR(s)) + kfree(s); +} + +STATIC int +xfs_vn_getattr( + struct vfsmount *mnt, + struct dentry *dentry, + struct kstat *stat) +{ + struct inode *inode = dentry->d_inode; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + + trace_xfs_getattr(ip); + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + stat->size = XFS_ISIZE(ip); + stat->dev = inode->i_sb->s_dev; + stat->mode = ip->i_d.di_mode; + stat->nlink = ip->i_d.di_nlink; + stat->uid = ip->i_d.di_uid; + stat->gid = ip->i_d.di_gid; + stat->ino = ip->i_ino; + stat->atime = inode->i_atime; + stat->mtime = inode->i_mtime; + stat->ctime = inode->i_ctime; + stat->blocks = + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); + + + switch (inode->i_mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + stat->blksize = BLKDEV_IOSIZE; + stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, + sysv_minor(ip->i_df.if_u2.if_rdev)); + break; + default: + if (XFS_IS_REALTIME_INODE(ip)) { + /* + * If the file blocks are being allocated from a + * realtime volume, then return the inode's realtime + * extent size or the realtime volume's extent size. + */ + stat->blksize = + xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; + } else + stat->blksize = xfs_preferred_iosize(mp); + stat->rdev = 0; + break; + } + + return 0; +} + +int +xfs_setattr_nonsize( + struct xfs_inode *ip, + struct iattr *iattr, + int flags) +{ + xfs_mount_t *mp = ip->i_mount; + struct inode *inode = VFS_I(ip); + int mask = iattr->ia_valid; + xfs_trans_t *tp; + int error; + uid_t uid = 0, iuid = 0; + gid_t gid = 0, igid = 0; + struct xfs_dquot *udqp = NULL, *gdqp = NULL; + struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; + + trace_xfs_setattr(ip); + + if (mp->m_flags & XFS_MOUNT_RDONLY) + return XFS_ERROR(EROFS); + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + error = -inode_change_ok(inode, iattr); + if (error) + return XFS_ERROR(error); + + ASSERT((mask & ATTR_SIZE) == 0); + + /* + * If disk quotas is on, we make sure that the dquots do exist on disk, + * before we start any other transactions. Trying to do this later + * is messy. We don't care to take a readlock to look at the ids + * in inode here, because we can't hold it across the trans_reserve. + * If the IDs do change before we take the ilock, we're covered + * because the i_*dquot fields will get updated anyway. + */ + if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { + uint qflags = 0; + + if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { + uid = iattr->ia_uid; + qflags |= XFS_QMOPT_UQUOTA; + } else { + uid = ip->i_d.di_uid; + } + if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { + gid = iattr->ia_gid; + qflags |= XFS_QMOPT_GQUOTA; + } else { + gid = ip->i_d.di_gid; + } + + /* + * We take a reference when we initialize udqp and gdqp, + * so it is important that we never blindly double trip on + * the same variable. See xfs_create() for an example. + */ + ASSERT(udqp == NULL); + ASSERT(gdqp == NULL); + error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), + qflags, &udqp, &gdqp); + if (error) + return error; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); + if (error) + goto out_dqrele; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Change file ownership. Must be the owner or privileged. + */ + if (mask & (ATTR_UID|ATTR_GID)) { + /* + * These IDs could have changed since we last looked at them. + * But, we're assured that if the ownership did change + * while we didn't have the inode locked, inode's dquot(s) + * would have changed also. + */ + iuid = ip->i_d.di_uid; + igid = ip->i_d.di_gid; + gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; + uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; + + /* + * Do a quota reservation only if uid/gid is actually + * going to change. + */ + if (XFS_IS_QUOTA_RUNNING(mp) && + ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || + (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { + ASSERT(tp); + error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, + capable(CAP_FOWNER) ? + XFS_QMOPT_FORCE_RES : 0); + if (error) /* out of quota */ + goto out_trans_cancel; + } + } + + xfs_trans_ijoin(tp, ip); + + /* + * Change file ownership. Must be the owner or privileged. + */ + if (mask & (ATTR_UID|ATTR_GID)) { + /* + * CAP_FSETID overrides the following restrictions: + * + * The set-user-ID and set-group-ID bits of a file will be + * cleared upon successful return from chown() + */ + if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && + !capable(CAP_FSETID)) + ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); + + /* + * Change the ownerships and register quota modifications + * in the transaction. + */ + if (iuid != uid) { + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { + ASSERT(mask & ATTR_UID); + ASSERT(udqp); + olddquot1 = xfs_qm_vop_chown(tp, ip, + &ip->i_udquot, udqp); + } + ip->i_d.di_uid = uid; + inode->i_uid = uid; + } + if (igid != gid) { + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { + ASSERT(!XFS_IS_PQUOTA_ON(mp)); + ASSERT(mask & ATTR_GID); + ASSERT(gdqp); + olddquot2 = xfs_qm_vop_chown(tp, ip, + &ip->i_gdquot, gdqp); + } + ip->i_d.di_gid = gid; + inode->i_gid = gid; + } + } + + /* + * Change file access modes. + */ + if (mask & ATTR_MODE) { + umode_t mode = iattr->ia_mode; + + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + mode &= ~S_ISGID; + + ip->i_d.di_mode &= S_IFMT; + ip->i_d.di_mode |= mode & ~S_IFMT; + + inode->i_mode &= S_IFMT; + inode->i_mode |= mode & ~S_IFMT; + } + + /* + * Change file access or modified times. + */ + if (mask & ATTR_ATIME) { + inode->i_atime = iattr->ia_atime; + ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; + ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; + ip->i_update_core = 1; + } + if (mask & ATTR_CTIME) { + inode->i_ctime = iattr->ia_ctime; + ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; + ip->i_update_core = 1; + } + if (mask & ATTR_MTIME) { + inode->i_mtime = iattr->ia_mtime; + ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; + ip->i_update_core = 1; + } + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + XFS_STATS_INC(xs_ig_attrchg); + + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + /* + * Release any dquot(s) the inode had kept before chown. + */ + xfs_qm_dqrele(olddquot1); + xfs_qm_dqrele(olddquot2); + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); + + if (error) + return XFS_ERROR(error); + + /* + * XXX(hch): Updating the ACL entries is not atomic vs the i_mode + * update. We could avoid this with linked transactions + * and passing down the transaction pointer all the way + * to attr_set. No previous user of the generic + * Posix ACL code seems to care about this issue either. + */ + if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { + error = -xfs_acl_chmod(inode); + if (error) + return XFS_ERROR(error); + } + + return 0; + +out_trans_cancel: + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_ILOCK_EXCL); +out_dqrele: + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); + return error; +} + +/* + * Truncate file. Must have write permission and not be a directory. + */ +int +xfs_setattr_size( + struct xfs_inode *ip, + struct iattr *iattr, + int flags) +{ + struct xfs_mount *mp = ip->i_mount; + struct inode *inode = VFS_I(ip); + int mask = iattr->ia_valid; + struct xfs_trans *tp; + int error; + uint lock_flags; + uint commit_flags = 0; + + trace_xfs_setattr(ip); + + if (mp->m_flags & XFS_MOUNT_RDONLY) + return XFS_ERROR(EROFS); + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + error = -inode_change_ok(inode, iattr); + if (error) + return XFS_ERROR(error); + + ASSERT(S_ISREG(ip->i_d.di_mode)); + ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| + ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| + ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); + + lock_flags = XFS_ILOCK_EXCL; + if (!(flags & XFS_ATTR_NOLOCK)) + lock_flags |= XFS_IOLOCK_EXCL; + xfs_ilock(ip, lock_flags); + + /* + * Short circuit the truncate case for zero length files. + */ + if (iattr->ia_size == 0 && + ip->i_size == 0 && ip->i_d.di_nextents == 0) { + if (!(mask & (ATTR_CTIME|ATTR_MTIME))) + goto out_unlock; + + /* + * Use the regular setattr path to update the timestamps. + */ + xfs_iunlock(ip, lock_flags); + iattr->ia_valid &= ~ATTR_SIZE; + return xfs_setattr_nonsize(ip, iattr, 0); + } + + /* + * Make sure that the dquots are attached to the inode. + */ + error = xfs_qm_dqattach_locked(ip, 0); + if (error) + goto out_unlock; + + /* + * Now we can make the changes. Before we join the inode to the + * transaction, take care of the part of the truncation that must be + * done without the inode lock. This needs to be done before joining + * the inode to the transaction, because the inode cannot be unlocked + * once it is a part of the transaction. + */ + if (iattr->ia_size > ip->i_size) { + /* + * Do the first part of growing a file: zero any data in the + * last block that is beyond the old EOF. We need to do this + * before the inode is joined to the transaction to modify + * i_size. + */ + error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size); + if (error) + goto out_unlock; + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + lock_flags &= ~XFS_ILOCK_EXCL; + + /* + * We are going to log the inode size change in this transaction so + * any previous writes that are beyond the on disk EOF and the new + * EOF that have not been written out need to be written here. If we + * do not write the data out, we expose ourselves to the null files + * problem. + * + * Only flush from the on disk size to the smaller of the in memory + * file size or the new size as that's the range we really care about + * here and prevents waiting for other data not within the range we + * care about here. + */ + if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) { + error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, + XBF_ASYNC, FI_NONE); + if (error) + goto out_unlock; + } + + /* + * Wait for all I/O to complete. + */ + xfs_ioend_wait(ip); + + error = -block_truncate_page(inode->i_mapping, iattr->ia_size, + xfs_get_blocks); + if (error) + goto out_unlock; + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); + error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) + goto out_trans_cancel; + + truncate_setsize(inode, iattr->ia_size); + + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + lock_flags |= XFS_ILOCK_EXCL; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip); + + /* + * Only change the c/mtime if we are changing the size or we are + * explicitly asked to change it. This handles the semantic difference + * between truncate() and ftruncate() as implemented in the VFS. + * + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a + * special case where we need to update the times despite not having + * these flags set. For all other operations the VFS set these flags + * explicitly if it wants a timestamp update. + */ + if (iattr->ia_size != ip->i_size && + (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { + iattr->ia_ctime = iattr->ia_mtime = + current_fs_time(inode->i_sb); + mask |= ATTR_CTIME | ATTR_MTIME; + } + + if (iattr->ia_size > ip->i_size) { + ip->i_d.di_size = iattr->ia_size; + ip->i_size = iattr->ia_size; + } else if (iattr->ia_size <= ip->i_size || + (iattr->ia_size == 0 && ip->i_d.di_nextents)) { + error = xfs_itruncate_data(&tp, ip, iattr->ia_size); + if (error) + goto out_trans_abort; + + /* + * Truncated "down", so we're removing references to old data + * here - if we delay flushing for a long time, we expose + * ourselves unduly to the notorious NULL files problem. So, + * we mark this inode and flush it when the file is closed, + * and do not wait the usual (long) time for writeout. + */ + xfs_iflags_set(ip, XFS_ITRUNCATED); + } + + if (mask & ATTR_CTIME) { + inode->i_ctime = iattr->ia_ctime; + ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; + ip->i_update_core = 1; + } + if (mask & ATTR_MTIME) { + inode->i_mtime = iattr->ia_mtime; + ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; + ip->i_update_core = 1; + } + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + XFS_STATS_INC(xs_ig_attrchg); + + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); +out_unlock: + if (lock_flags) + xfs_iunlock(ip, lock_flags); + return error; + +out_trans_abort: + commit_flags |= XFS_TRANS_ABORT; +out_trans_cancel: + xfs_trans_cancel(tp, commit_flags); + goto out_unlock; +} + +STATIC int +xfs_vn_setattr( + struct dentry *dentry, + struct iattr *iattr) +{ + if (iattr->ia_valid & ATTR_SIZE) + return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); + return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); +} + +#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) + +/* + * Call fiemap helper to fill in user data. + * Returns positive errors to xfs_getbmap. + */ +STATIC int +xfs_fiemap_format( + void **arg, + struct getbmapx *bmv, + int *full) +{ + int error; + struct fiemap_extent_info *fieinfo = *arg; + u32 fiemap_flags = 0; + u64 logical, physical, length; + + /* Do nothing for a hole */ + if (bmv->bmv_block == -1LL) + return 0; + + logical = BBTOB(bmv->bmv_offset); + physical = BBTOB(bmv->bmv_block); + length = BBTOB(bmv->bmv_length); + + if (bmv->bmv_oflags & BMV_OF_PREALLOC) + fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; + else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { + fiemap_flags |= FIEMAP_EXTENT_DELALLOC; + physical = 0; /* no block yet */ + } + if (bmv->bmv_oflags & BMV_OF_LAST) + fiemap_flags |= FIEMAP_EXTENT_LAST; + + error = fiemap_fill_next_extent(fieinfo, logical, physical, + length, fiemap_flags); + if (error > 0) { + error = 0; + *full = 1; /* user array now full */ + } + + return -error; +} + +STATIC int +xfs_vn_fiemap( + struct inode *inode, + struct fiemap_extent_info *fieinfo, + u64 start, + u64 length) +{ + xfs_inode_t *ip = XFS_I(inode); + struct getbmapx bm; + int error; + + error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); + if (error) + return error; + + /* Set up bmap header for xfs internal routine */ + bm.bmv_offset = BTOBB(start); + /* Special case for whole file */ + if (length == FIEMAP_MAX_OFFSET) + bm.bmv_length = -1LL; + else + bm.bmv_length = BTOBB(length); + + /* We add one because in getbmap world count includes the header */ + bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : + fieinfo->fi_extents_max + 1; + bm.bmv_count = min_t(__s32, bm.bmv_count, + (PAGE_SIZE * 16 / sizeof(struct getbmapx))); + bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; + if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) + bm.bmv_iflags |= BMV_IF_ATTRFORK; + if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) + bm.bmv_iflags |= BMV_IF_DELALLOC; + + error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); + if (error) + return -error; + + return 0; +} + +static const struct inode_operations xfs_inode_operations = { + .get_acl = xfs_get_acl, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = xfs_vn_listxattr, + .fiemap = xfs_vn_fiemap, +}; + +static const struct inode_operations xfs_dir_inode_operations = { + .create = xfs_vn_create, + .lookup = xfs_vn_lookup, + .link = xfs_vn_link, + .unlink = xfs_vn_unlink, + .symlink = xfs_vn_symlink, + .mkdir = xfs_vn_mkdir, + /* + * Yes, XFS uses the same method for rmdir and unlink. + * + * There are some subtile differences deeper in the code, + * but we use S_ISDIR to check for those. + */ + .rmdir = xfs_vn_unlink, + .mknod = xfs_vn_mknod, + .rename = xfs_vn_rename, + .get_acl = xfs_get_acl, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = xfs_vn_listxattr, +}; + +static const struct inode_operations xfs_dir_ci_inode_operations = { + .create = xfs_vn_create, + .lookup = xfs_vn_ci_lookup, + .link = xfs_vn_link, + .unlink = xfs_vn_unlink, + .symlink = xfs_vn_symlink, + .mkdir = xfs_vn_mkdir, + /* + * Yes, XFS uses the same method for rmdir and unlink. + * + * There are some subtile differences deeper in the code, + * but we use S_ISDIR to check for those. + */ + .rmdir = xfs_vn_unlink, + .mknod = xfs_vn_mknod, + .rename = xfs_vn_rename, + .get_acl = xfs_get_acl, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = xfs_vn_listxattr, +}; + +static const struct inode_operations xfs_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = xfs_vn_follow_link, + .put_link = xfs_vn_put_link, + .get_acl = xfs_get_acl, + .getattr = xfs_vn_getattr, + .setattr = xfs_vn_setattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = xfs_vn_listxattr, +}; + +STATIC void +xfs_diflags_to_iflags( + struct inode *inode, + struct xfs_inode *ip) +{ + if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) + inode->i_flags |= S_IMMUTABLE; + else + inode->i_flags &= ~S_IMMUTABLE; + if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) + inode->i_flags |= S_APPEND; + else + inode->i_flags &= ~S_APPEND; + if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) + inode->i_flags |= S_SYNC; + else + inode->i_flags &= ~S_SYNC; + if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) + inode->i_flags |= S_NOATIME; + else + inode->i_flags &= ~S_NOATIME; +} + +/* + * Initialize the Linux inode, set up the operation vectors and + * unlock the inode. + * + * When reading existing inodes from disk this is called directly + * from xfs_iget, when creating a new inode it is called from + * xfs_ialloc after setting up the inode. + * + * We are always called with an uninitialised linux inode here. + * We need to initialise the necessary fields and take a reference + * on it. + */ +void +xfs_setup_inode( + struct xfs_inode *ip) +{ + struct inode *inode = &ip->i_vnode; + + inode->i_ino = ip->i_ino; + inode->i_state = I_NEW; + + inode_sb_list_add(inode); + /* make the inode look hashed for the writeback code */ + hlist_add_fake(&inode->i_hash); + + inode->i_mode = ip->i_d.di_mode; + inode->i_nlink = ip->i_d.di_nlink; + inode->i_uid = ip->i_d.di_uid; + inode->i_gid = ip->i_d.di_gid; + + switch (inode->i_mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + inode->i_rdev = + MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, + sysv_minor(ip->i_df.if_u2.if_rdev)); + break; + default: + inode->i_rdev = 0; + break; + } + + inode->i_generation = ip->i_d.di_gen; + i_size_write(inode, ip->i_d.di_size); + inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; + inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; + inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; + inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; + inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; + inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; + xfs_diflags_to_iflags(inode, ip); + + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + inode->i_op = &xfs_inode_operations; + inode->i_fop = &xfs_file_operations; + inode->i_mapping->a_ops = &xfs_address_space_operations; + break; + case S_IFDIR: + if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) + inode->i_op = &xfs_dir_ci_inode_operations; + else + inode->i_op = &xfs_dir_inode_operations; + inode->i_fop = &xfs_dir_file_operations; + break; + case S_IFLNK: + inode->i_op = &xfs_symlink_inode_operations; + if (!(ip->i_df.if_flags & XFS_IFINLINE)) + inode->i_mapping->a_ops = &xfs_address_space_operations; + break; + default: + inode->i_op = &xfs_inode_operations; + init_special_inode(inode, inode->i_mode, inode->i_rdev); + break; + } + + /* + * If there is no attribute fork no ACL can exist on this inode, + * and it can't have any file capabilities attached to it either. + */ + if (!XFS_IFORK_Q(ip)) { + inode_has_no_xattr(inode); + cache_no_acl(inode); + } + + xfs_iflags_clear(ip, XFS_INEW); + barrier(); + + unlock_new_inode(inode); +} diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h new file mode 100644 index 0000000..ef41c92 --- /dev/null +++ b/fs/xfs/xfs_iops.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_IOPS_H__ +#define __XFS_IOPS_H__ + +struct xfs_inode; + +extern const struct file_operations xfs_file_operations; +extern const struct file_operations xfs_dir_file_operations; + +extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); + +extern void xfs_setup_inode(struct xfs_inode *); + +#endif /* __XFS_IOPS_H__ */ diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h new file mode 100644 index 0000000..1e8a45e --- /dev/null +++ b/fs/xfs/xfs_linux.h @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_LINUX__ +#define __XFS_LINUX__ + +#include + +/* + * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits. + * XFS_BIG_INUMS requires XFS_BIG_BLKNOS to be set. + */ +#if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) +# define XFS_BIG_BLKNOS 1 +# define XFS_BIG_INUMS 1 +#else +# define XFS_BIG_BLKNOS 0 +# define XFS_BIG_INUMS 0 +#endif + +#include "xfs_types.h" + +#include "kmem.h" +#include "mrlock.h" +#include "time.h" +#include "uuid.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "xfs_vnode.h" +#include "xfs_stats.h" +#include "xfs_sysctl.h" +#include "xfs_iops.h" +#include "xfs_aops.h" +#include "xfs_super.h" +#include "xfs_buf.h" +#include "xfs_message.h" + +#ifdef __BIG_ENDIAN +#define XFS_NATIVE_HOST 1 +#else +#undef XFS_NATIVE_HOST +#endif + +/* + * Feature macros (disable/enable) + */ +#ifdef CONFIG_SMP +#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +#else +#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ +#endif + +#define irix_sgid_inherit xfs_params.sgid_inherit.val +#define irix_symlink_mode xfs_params.symlink_mode.val +#define xfs_panic_mask xfs_params.panic_mask.val +#define xfs_error_level xfs_params.error_level.val +#define xfs_syncd_centisecs xfs_params.syncd_timer.val +#define xfs_stats_clear xfs_params.stats_clear.val +#define xfs_inherit_sync xfs_params.inherit_sync.val +#define xfs_inherit_nodump xfs_params.inherit_nodump.val +#define xfs_inherit_noatime xfs_params.inherit_noatim.val +#define xfs_buf_timer_centisecs xfs_params.xfs_buf_timer.val +#define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val +#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val +#define xfs_rotorstep xfs_params.rotorstep.val +#define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val +#define xfs_fstrm_centisecs xfs_params.fstrm_timer.val + +#define current_cpu() (raw_smp_processor_id()) +#define current_pid() (current->pid) +#define current_test_flags(f) (current->flags & (f)) +#define current_set_flags_nested(sp, f) \ + (*(sp) = current->flags, current->flags |= (f)) +#define current_clear_flags_nested(sp, f) \ + (*(sp) = current->flags, current->flags &= ~(f)) +#define current_restore_flags_nested(sp, f) \ + (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) + +#define spinlock_destroy(lock) + +#define NBBY 8 /* number of bits per byte */ + +/* + * Size of block device i/o is parameterized here. + * Currently the system supports page-sized i/o. + */ +#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT +#define BLKDEV_IOSIZE (1<> 32; + __low = c; + if (__high) { + __upper = __high % (b); + __high = __high / (b); + } + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); + asm("":"=A" (c):"a" (__low),"d" (__high)); + *(__u64 *)a = c; + return __mod; + } + } + + /* NOTREACHED */ + return 0; +} + +/* Side effect free 64 bit mod operation */ +static inline __u32 xfs_do_mod(void *a, __u32 b, int n) +{ + switch (n) { + case 4: + return *(__u32 *)a % b; + case 8: + { + unsigned long __upper, __low, __high, __mod; + __u64 c = *(__u64 *)a; + __upper = __high = c >> 32; + __low = c; + if (__high) { + __upper = __high % (b); + __high = __high / (b); + } + asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper)); + asm("":"=A" (c):"a" (__low),"d" (__high)); + return __mod; + } + } + + /* NOTREACHED */ + return 0; +} +#else +static inline __u32 xfs_do_div(void *a, __u32 b, int n) +{ + __u32 mod; + + switch (n) { + case 4: + mod = *(__u32 *)a % b; + *(__u32 *)a = *(__u32 *)a / b; + return mod; + case 8: + mod = do_div(*(__u64 *)a, b); + return mod; + } + + /* NOTREACHED */ + return 0; +} + +/* Side effect free 64 bit mod operation */ +static inline __u32 xfs_do_mod(void *a, __u32 b, int n) +{ + switch (n) { + case 4: + return *(__u32 *)a % b; + case 8: + { + __u64 c = *(__u64 *)a; + return do_div(c, b); + } + } + + /* NOTREACHED */ + return 0; +} +#endif + +#undef do_div +#define do_div(a, b) xfs_do_div(&(a), (b), sizeof(a)) +#define do_mod(a, b) xfs_do_mod(&(a), (b), sizeof(a)) + +static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y) +{ + x += y - 1; + do_div(x, y); + return(x * y); +} + +static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y) +{ + x += y - 1; + do_div(x, y); + return x; +} + +/* ARM old ABI has some weird alignment/padding */ +#if defined(__arm__) && !defined(__ARM_EABI__) +#define __arch_pack __attribute__((packed)) +#else +#define __arch_pack +#endif + +#define ASSERT_ALWAYS(expr) \ + (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) + +#ifndef DEBUG +#define ASSERT(expr) ((void)0) + +#ifndef STATIC +# define STATIC static noinline +#endif + +#else /* DEBUG */ + +#define ASSERT(expr) \ + (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) + +#ifndef STATIC +# define STATIC noinline +#endif + +#endif /* DEBUG */ + +#endif /* __XFS_LINUX__ */ diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c new file mode 100644 index 0000000..bd672de --- /dev/null +++ b/fs/xfs/xfs_message.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2011 Red Hat, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_types.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_mount.h" + +/* + * XFS logging functions + */ +static void +__xfs_printk( + const char *level, + const struct xfs_mount *mp, + struct va_format *vaf) +{ + if (mp && mp->m_fsname) { + printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); + return; + } + printk("%sXFS: %pV\n", level, vaf); +} + +#define define_xfs_printk_level(func, kern_level) \ +void func(const struct xfs_mount *mp, const char *fmt, ...) \ +{ \ + struct va_format vaf; \ + va_list args; \ + \ + va_start(args, fmt); \ + \ + vaf.fmt = fmt; \ + vaf.va = &args; \ + \ + __xfs_printk(kern_level, mp, &vaf); \ + va_end(args); \ +} \ + +define_xfs_printk_level(xfs_emerg, KERN_EMERG); +define_xfs_printk_level(xfs_alert, KERN_ALERT); +define_xfs_printk_level(xfs_crit, KERN_CRIT); +define_xfs_printk_level(xfs_err, KERN_ERR); +define_xfs_printk_level(xfs_warn, KERN_WARNING); +define_xfs_printk_level(xfs_notice, KERN_NOTICE); +define_xfs_printk_level(xfs_info, KERN_INFO); +#ifdef DEBUG +define_xfs_printk_level(xfs_debug, KERN_DEBUG); +#endif + +void +xfs_alert_tag( + const struct xfs_mount *mp, + int panic_tag, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + int do_panic = 0; + + if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { + xfs_alert(mp, "Transforming an alert into a BUG."); + do_panic = 1; + } + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + __xfs_printk(KERN_ALERT, mp, &vaf); + va_end(args); + + BUG_ON(do_panic); +} + +void +assfail(char *expr, char *file, int line) +{ + xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d", + expr, file, line); + BUG(); +} + +void +xfs_hex_dump(void *p, int length) +{ + print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); +} diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h new file mode 100644 index 0000000..7fb7ea0 --- /dev/null +++ b/fs/xfs/xfs_message.h @@ -0,0 +1,39 @@ +#ifndef __XFS_MESSAGE_H +#define __XFS_MESSAGE_H 1 + +struct xfs_mount; + +extern void xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern void xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern void xfs_alert_tag(const struct xfs_mount *mp, int tag, + const char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); +extern void xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern void xfs_err(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern void xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern void xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +extern void xfs_info(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); + +#ifdef DEBUG +extern void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +#else +static inline void +__attribute__ ((format (printf, 2, 3))) +xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) +{ +} +#endif + +extern void assfail(char *expr, char *f, int l); + +extern void xfs_hex_dump(void *p, int length); + +#endif /* __XFS_MESSAGE_H */ diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c new file mode 100644 index 0000000..9a0aa76 --- /dev/null +++ b/fs/xfs/xfs_qm.c @@ -0,0 +1,2416 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_bmap.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_space.h" +#include "xfs_utils.h" +#include "xfs_qm.h" +#include "xfs_trace.h" + +/* + * The global quota manager. There is only one of these for the entire + * system, _not_ one per file system. XQM keeps track of the overall + * quota functionality, including maintaining the freelist and hash + * tables of dquots. + */ +struct mutex xfs_Gqm_lock; +struct xfs_qm *xfs_Gqm; +uint ndquot; + +kmem_zone_t *qm_dqzone; +kmem_zone_t *qm_dqtrxzone; + +STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); +STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); + +STATIC int xfs_qm_init_quotainos(xfs_mount_t *); +STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); +STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); + +static struct shrinker xfs_qm_shaker = { + .shrink = xfs_qm_shake, + .seeks = DEFAULT_SEEKS, +}; + +/* + * Initialize the XQM structure. + * Note that there is not one quota manager per file system. + */ +STATIC struct xfs_qm * +xfs_Gqm_init(void) +{ + xfs_dqhash_t *udqhash, *gdqhash; + xfs_qm_t *xqm; + size_t hsize; + uint i; + + /* + * Initialize the dquot hash tables. + */ + udqhash = kmem_zalloc_greedy(&hsize, + XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t), + XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t)); + if (!udqhash) + goto out; + + gdqhash = kmem_zalloc_large(hsize); + if (!gdqhash) + goto out_free_udqhash; + + hsize /= sizeof(xfs_dqhash_t); + ndquot = hsize << 8; + + xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); + xqm->qm_dqhashmask = hsize - 1; + xqm->qm_usr_dqhtable = udqhash; + xqm->qm_grp_dqhtable = gdqhash; + ASSERT(xqm->qm_usr_dqhtable != NULL); + ASSERT(xqm->qm_grp_dqhtable != NULL); + + for (i = 0; i < hsize; i++) { + xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i); + xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i); + } + + /* + * Freelist of all dquots of all file systems + */ + INIT_LIST_HEAD(&xqm->qm_dqfrlist); + xqm->qm_dqfrlist_cnt = 0; + mutex_init(&xqm->qm_dqfrlist_lock); + + /* + * dquot zone. we register our own low-memory callback. + */ + if (!qm_dqzone) { + xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t), + "xfs_dquots"); + qm_dqzone = xqm->qm_dqzone; + } else + xqm->qm_dqzone = qm_dqzone; + + register_shrinker(&xfs_qm_shaker); + + /* + * The t_dqinfo portion of transactions. + */ + if (!qm_dqtrxzone) { + xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t), + "xfs_dqtrx"); + qm_dqtrxzone = xqm->qm_dqtrxzone; + } else + xqm->qm_dqtrxzone = qm_dqtrxzone; + + atomic_set(&xqm->qm_totaldquots, 0); + xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; + xqm->qm_nrefs = 0; + return xqm; + + out_free_udqhash: + kmem_free_large(udqhash); + out: + return NULL; +} + +/* + * Destroy the global quota manager when its reference count goes to zero. + */ +STATIC void +xfs_qm_destroy( + struct xfs_qm *xqm) +{ + struct xfs_dquot *dqp, *n; + int hsize, i; + + ASSERT(xqm != NULL); + ASSERT(xqm->qm_nrefs == 0); + unregister_shrinker(&xfs_qm_shaker); + hsize = xqm->qm_dqhashmask + 1; + for (i = 0; i < hsize; i++) { + xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); + xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i])); + } + kmem_free_large(xqm->qm_usr_dqhtable); + kmem_free_large(xqm->qm_grp_dqhtable); + xqm->qm_usr_dqhtable = NULL; + xqm->qm_grp_dqhtable = NULL; + xqm->qm_dqhashmask = 0; + + /* frlist cleanup */ + mutex_lock(&xqm->qm_dqfrlist_lock); + list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) { + xfs_dqlock(dqp); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + } + mutex_unlock(&xqm->qm_dqfrlist_lock); + mutex_destroy(&xqm->qm_dqfrlist_lock); + kmem_free(xqm); +} + +/* + * Called at mount time to let XQM know that another file system is + * starting quotas. This isn't crucial information as the individual mount + * structures are pretty independent, but it helps the XQM keep a + * global view of what's going on. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_hold_quotafs_ref( + struct xfs_mount *mp) +{ + /* + * Need to lock the xfs_Gqm structure for things like this. For example, + * the structure could disappear between the entry to this routine and + * a HOLD operation if not locked. + */ + mutex_lock(&xfs_Gqm_lock); + + if (!xfs_Gqm) { + xfs_Gqm = xfs_Gqm_init(); + if (!xfs_Gqm) { + mutex_unlock(&xfs_Gqm_lock); + return ENOMEM; + } + } + + /* + * We can keep a list of all filesystems with quotas mounted for + * debugging and statistical purposes, but ... + * Just take a reference and get out. + */ + xfs_Gqm->qm_nrefs++; + mutex_unlock(&xfs_Gqm_lock); + + return 0; +} + + +/* + * Release the reference that a filesystem took at mount time, + * so that we know when we need to destroy the entire quota manager. + */ +/* ARGSUSED */ +STATIC void +xfs_qm_rele_quotafs_ref( + struct xfs_mount *mp) +{ + xfs_dquot_t *dqp, *n; + + ASSERT(xfs_Gqm); + ASSERT(xfs_Gqm->qm_nrefs > 0); + + /* + * Go thru the freelist and destroy all inactive dquots. + */ + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); + + list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) { + xfs_dqlock(dqp); + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(dqp->q_mount == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(list_empty(&dqp->q_hashlist)); + ASSERT(list_empty(&dqp->q_mplist)); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + xfs_dqunlock(dqp); + xfs_qm_dqdestroy(dqp); + } else { + xfs_dqunlock(dqp); + } + } + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + + /* + * Destroy the entire XQM. If somebody mounts with quotaon, this'll + * be restarted. + */ + mutex_lock(&xfs_Gqm_lock); + if (--xfs_Gqm->qm_nrefs == 0) { + xfs_qm_destroy(xfs_Gqm); + xfs_Gqm = NULL; + } + mutex_unlock(&xfs_Gqm_lock); +} + +/* + * Just destroy the quotainfo structure. + */ +void +xfs_qm_unmount( + struct xfs_mount *mp) +{ + if (mp->m_quotainfo) { + xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); + xfs_qm_destroy_quotainfo(mp); + } +} + + +/* + * This is called from xfs_mountfs to start quotas and initialize all + * necessary data structures like quotainfo. This is also responsible for + * running a quotacheck as necessary. We are guaranteed that the superblock + * is consistently read in at this point. + * + * If we fail here, the mount will continue with quota turned off. We don't + * need to inidicate success or failure at all. + */ +void +xfs_qm_mount_quotas( + xfs_mount_t *mp) +{ + int error = 0; + uint sbf; + + /* + * If quotas on realtime volumes is not supported, we disable + * quotas immediately. + */ + if (mp->m_sb.sb_rextents) { + xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); + mp->m_qflags = 0; + goto write_changes; + } + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * Allocate the quotainfo structure inside the mount struct, and + * create quotainode(s), and change/rev superblock if necessary. + */ + error = xfs_qm_init_quotainfo(mp); + if (error) { + /* + * We must turn off quotas. + */ + ASSERT(mp->m_quotainfo == NULL); + mp->m_qflags = 0; + goto write_changes; + } + /* + * If any of the quotas are not consistent, do a quotacheck. + */ + if (XFS_QM_NEED_QUOTACHECK(mp)) { + error = xfs_qm_quotacheck(mp); + if (error) { + /* Quotacheck failed and disabled quotas. */ + return; + } + } + /* + * If one type of quotas is off, then it will lose its + * quotachecked status, since we won't be doing accounting for + * that type anymore. + */ + if (!XFS_IS_UQUOTA_ON(mp)) + mp->m_qflags &= ~XFS_UQUOTA_CHKD; + if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) + mp->m_qflags &= ~XFS_OQUOTA_CHKD; + + write_changes: + /* + * We actually don't have to acquire the m_sb_lock at all. + * This can only be called from mount, and that's single threaded. XXX + */ + spin_lock(&mp->m_sb_lock); + sbf = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; + spin_unlock(&mp->m_sb_lock); + + if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { + if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { + /* + * We could only have been turning quotas off. + * We aren't in very good shape actually because + * the incore structures are convinced that quotas are + * off, but the on disk superblock doesn't know that ! + */ + ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); + xfs_alert(mp, "%s: Superblock update failed!", + __func__); + } + } + + if (error) { + xfs_warn(mp, "Failed to initialize disk quotas."); + return; + } +} + +/* + * Called from the vfsops layer. + */ +void +xfs_qm_unmount_quotas( + xfs_mount_t *mp) +{ + /* + * Release the dquots that root inode, et al might be holding, + * before we flush quotas and blow away the quotainfo structure. + */ + ASSERT(mp->m_rootip); + xfs_qm_dqdetach(mp->m_rootip); + if (mp->m_rbmip) + xfs_qm_dqdetach(mp->m_rbmip); + if (mp->m_rsumip) + xfs_qm_dqdetach(mp->m_rsumip); + + /* + * Release the quota inodes. + */ + if (mp->m_quotainfo) { + if (mp->m_quotainfo->qi_uquotaip) { + IRELE(mp->m_quotainfo->qi_uquotaip); + mp->m_quotainfo->qi_uquotaip = NULL; + } + if (mp->m_quotainfo->qi_gquotaip) { + IRELE(mp->m_quotainfo->qi_gquotaip); + mp->m_quotainfo->qi_gquotaip = NULL; + } + } +} + +/* + * Flush all dquots of the given file system to disk. The dquots are + * _not_ purged from memory here, just their data written to disk. + */ +STATIC int +xfs_qm_dqflush_all( + struct xfs_mount *mp, + int sync_mode) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + int recl; + struct xfs_dquot *dqp; + int error; + + if (!q) + return 0; +again: + mutex_lock(&q->qi_dqlist_lock); + list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { + xfs_dqlock(dqp); + if (! XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqunlock(dqp); + continue; + } + + /* XXX a sentinel would be better */ + recl = q->qi_dqreclaims; + if (!xfs_dqflock_nowait(dqp)) { + /* + * If we can't grab the flush lock then check + * to see if the dquot has been flushed delayed + * write. If so, grab its buffer and send it + * out immediately. We'll be able to acquire + * the flush lock when the I/O completes. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + /* + * Let go of the mplist lock. We don't want to hold it + * across a disk write. + */ + mutex_unlock(&q->qi_dqlist_lock); + error = xfs_qm_dqflush(dqp, sync_mode); + xfs_dqunlock(dqp); + if (error) + return error; + + mutex_lock(&q->qi_dqlist_lock); + if (recl != q->qi_dqreclaims) { + mutex_unlock(&q->qi_dqlist_lock); + /* XXX restart limit */ + goto again; + } + } + + mutex_unlock(&q->qi_dqlist_lock); + /* return ! busy */ + return 0; +} +/* + * Release the group dquot pointers the user dquots may be + * carrying around as a hint. mplist is locked on entry and exit. + */ +STATIC void +xfs_qm_detach_gdquots( + struct xfs_mount *mp) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_dquot *dqp, *gdqp; + int nrecl; + + again: + ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); + list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { + xfs_dqlock(dqp); + if ((gdqp = dqp->q_gdquot)) { + xfs_dqlock(gdqp); + dqp->q_gdquot = NULL; + } + xfs_dqunlock(dqp); + + if (gdqp) { + /* + * Can't hold the mplist lock across a dqput. + * XXXmust convert to marker based iterations here. + */ + nrecl = q->qi_dqreclaims; + mutex_unlock(&q->qi_dqlist_lock); + xfs_qm_dqput(gdqp); + + mutex_lock(&q->qi_dqlist_lock); + if (nrecl != q->qi_dqreclaims) + goto again; + } + } +} + +/* + * Go through all the incore dquots of this file system and take them + * off the mplist and hashlist, if the dquot type matches the dqtype + * parameter. This is used when turning off quota accounting for + * users and/or groups, as well as when the filesystem is unmounting. + */ +STATIC int +xfs_qm_dqpurge_int( + struct xfs_mount *mp, + uint flags) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_dquot *dqp, *n; + uint dqtype; + int nrecl; + int nmisses; + + if (!q) + return 0; + + dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0; + dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0; + dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0; + + mutex_lock(&q->qi_dqlist_lock); + + /* + * In the first pass through all incore dquots of this filesystem, + * we release the group dquot pointers the user dquots may be + * carrying around as a hint. We need to do this irrespective of + * what's being turned off. + */ + xfs_qm_detach_gdquots(mp); + + again: + nmisses = 0; + ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); + /* + * Try to get rid of all of the unwanted dquots. The idea is to + * get them off mplist and hashlist, but leave them on freelist. + */ + list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) { + /* + * It's OK to look at the type without taking dqlock here. + * We're holding the mplist lock here, and that's needed for + * a dqreclaim. + */ + if ((dqp->dq_flags & dqtype) == 0) + continue; + + if (!mutex_trylock(&dqp->q_hash->qh_lock)) { + nrecl = q->qi_dqreclaims; + mutex_unlock(&q->qi_dqlist_lock); + mutex_lock(&dqp->q_hash->qh_lock); + mutex_lock(&q->qi_dqlist_lock); + + /* + * XXXTheoretically, we can get into a very long + * ping pong game here. + * No one can be adding dquots to the mplist at + * this point, but somebody might be taking things off. + */ + if (nrecl != q->qi_dqreclaims) { + mutex_unlock(&dqp->q_hash->qh_lock); + goto again; + } + } + + /* + * Take the dquot off the mplist and hashlist. It may remain on + * freelist in INACTIVE state. + */ + nmisses += xfs_qm_dqpurge(dqp); + } + mutex_unlock(&q->qi_dqlist_lock); + return nmisses; +} + +int +xfs_qm_dqpurge_all( + xfs_mount_t *mp, + uint flags) +{ + int ndquots; + + /* + * Purge the dquot cache. + * None of the dquots should really be busy at this point. + */ + if (mp->m_quotainfo) { + while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) { + delay(ndquots * 10); + } + } + return 0; +} + +STATIC int +xfs_qm_dqattach_one( + xfs_inode_t *ip, + xfs_dqid_t id, + uint type, + uint doalloc, + xfs_dquot_t *udqhint, /* hint */ + xfs_dquot_t **IO_idqpp) +{ + xfs_dquot_t *dqp; + int error; + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + error = 0; + + /* + * See if we already have it in the inode itself. IO_idqpp is + * &i_udquot or &i_gdquot. This made the code look weird, but + * made the logic a lot simpler. + */ + dqp = *IO_idqpp; + if (dqp) { + trace_xfs_dqattach_found(dqp); + return 0; + } + + /* + * udqhint is the i_udquot field in inode, and is non-NULL only + * when the type arg is group/project. Its purpose is to save a + * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside + * the user dquot. + */ + if (udqhint) { + ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); + xfs_dqlock(udqhint); + + /* + * No need to take dqlock to look at the id. + * + * The ID can't change until it gets reclaimed, and it won't + * be reclaimed as long as we have a ref from inode and we + * hold the ilock. + */ + dqp = udqhint->q_gdquot; + if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { + xfs_dqlock(dqp); + XFS_DQHOLD(dqp); + ASSERT(*IO_idqpp == NULL); + *IO_idqpp = dqp; + + xfs_dqunlock(dqp); + xfs_dqunlock(udqhint); + return 0; + } + + /* + * We can't hold a dquot lock when we call the dqget code. + * We'll deadlock in no time, because of (not conforming to) + * lock ordering - the inodelock comes before any dquot lock, + * and we may drop and reacquire the ilock in xfs_qm_dqget(). + */ + xfs_dqunlock(udqhint); + } + + /* + * Find the dquot from somewhere. This bumps the + * reference count of dquot and returns it locked. + * This can return ENOENT if dquot didn't exist on + * disk and we didn't ask it to allocate; + * ESRCH if quotas got turned off suddenly. + */ + error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp); + if (error) + return error; + + trace_xfs_dqattach_get(dqp); + + /* + * dqget may have dropped and re-acquired the ilock, but it guarantees + * that the dquot returned is the one that should go in the inode. + */ + *IO_idqpp = dqp; + xfs_dqunlock(dqp); + return 0; +} + + +/* + * Given a udquot and gdquot, attach a ptr to the group dquot in the + * udquot as a hint for future lookups. The idea sounds simple, but the + * execution isn't, because the udquot might have a group dquot attached + * already and getting rid of that gets us into lock ordering constraints. + * The process is complicated more by the fact that the dquots may or may not + * be locked on entry. + */ +STATIC void +xfs_qm_dqattach_grouphint( + xfs_dquot_t *udq, + xfs_dquot_t *gdq) +{ + xfs_dquot_t *tmp; + + xfs_dqlock(udq); + + if ((tmp = udq->q_gdquot)) { + if (tmp == gdq) { + xfs_dqunlock(udq); + return; + } + + udq->q_gdquot = NULL; + /* + * We can't keep any dqlocks when calling dqrele, + * because the freelist lock comes before dqlocks. + */ + xfs_dqunlock(udq); + /* + * we took a hard reference once upon a time in dqget, + * so give it back when the udquot no longer points at it + * dqput() does the unlocking of the dquot. + */ + xfs_qm_dqrele(tmp); + + xfs_dqlock(udq); + xfs_dqlock(gdq); + + } else { + ASSERT(XFS_DQ_IS_LOCKED(udq)); + xfs_dqlock(gdq); + } + + ASSERT(XFS_DQ_IS_LOCKED(udq)); + ASSERT(XFS_DQ_IS_LOCKED(gdq)); + /* + * Somebody could have attached a gdquot here, + * when we dropped the uqlock. If so, just do nothing. + */ + if (udq->q_gdquot == NULL) { + XFS_DQHOLD(gdq); + udq->q_gdquot = gdq; + } + + xfs_dqunlock(gdq); + xfs_dqunlock(udq); +} + + +/* + * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON + * into account. + * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. + * Inode may get unlocked and relocked in here, and the caller must deal with + * the consequences. + */ +int +xfs_qm_dqattach_locked( + xfs_inode_t *ip, + uint flags) +{ + xfs_mount_t *mp = ip->i_mount; + uint nquotas = 0; + int error = 0; + + if (!XFS_IS_QUOTA_RUNNING(mp) || + !XFS_IS_QUOTA_ON(mp) || + !XFS_NOT_DQATTACHED(mp, ip) || + ip->i_ino == mp->m_sb.sb_uquotino || + ip->i_ino == mp->m_sb.sb_gquotino) + return 0; + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + + if (XFS_IS_UQUOTA_ON(mp)) { + error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, + flags & XFS_QMOPT_DQALLOC, + NULL, &ip->i_udquot); + if (error) + goto done; + nquotas++; + } + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + if (XFS_IS_OQUOTA_ON(mp)) { + error = XFS_IS_GQUOTA_ON(mp) ? + xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, + flags & XFS_QMOPT_DQALLOC, + ip->i_udquot, &ip->i_gdquot) : + xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, + flags & XFS_QMOPT_DQALLOC, + ip->i_udquot, &ip->i_gdquot); + /* + * Don't worry about the udquot that we may have + * attached above. It'll get detached, if not already. + */ + if (error) + goto done; + nquotas++; + } + + /* + * Attach this group quota to the user quota as a hint. + * This WON'T, in general, result in a thrash. + */ + if (nquotas == 2) { + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(ip->i_udquot); + ASSERT(ip->i_gdquot); + + /* + * We may or may not have the i_udquot locked at this point, + * but this check is OK since we don't depend on the i_gdquot to + * be accurate 100% all the time. It is just a hint, and this + * will succeed in general. + */ + if (ip->i_udquot->q_gdquot == ip->i_gdquot) + goto done; + /* + * Attach i_gdquot to the gdquot hint inside the i_udquot. + */ + xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); + } + + done: +#ifdef DEBUG + if (!error) { + if (XFS_IS_UQUOTA_ON(mp)) + ASSERT(ip->i_udquot); + if (XFS_IS_OQUOTA_ON(mp)) + ASSERT(ip->i_gdquot); + } + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); +#endif + return error; +} + +int +xfs_qm_dqattach( + struct xfs_inode *ip, + uint flags) +{ + int error; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + error = xfs_qm_dqattach_locked(ip, flags); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + return error; +} + +/* + * Release dquots (and their references) if any. + * The inode should be locked EXCL except when this's called by + * xfs_ireclaim. + */ +void +xfs_qm_dqdetach( + xfs_inode_t *ip) +{ + if (!(ip->i_udquot || ip->i_gdquot)) + return; + + trace_xfs_dquot_dqdetach(ip); + + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); + ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); + if (ip->i_udquot) { + xfs_qm_dqrele(ip->i_udquot); + ip->i_udquot = NULL; + } + if (ip->i_gdquot) { + xfs_qm_dqrele(ip->i_gdquot); + ip->i_gdquot = NULL; + } +} + +int +xfs_qm_sync( + struct xfs_mount *mp, + int flags) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + int recl, restarts; + struct xfs_dquot *dqp; + int error; + + if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) + return 0; + + restarts = 0; + + again: + mutex_lock(&q->qi_dqlist_lock); + /* + * dqpurge_all() also takes the mplist lock and iterate thru all dquots + * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared + * when we have the mplist lock, we know that dquots will be consistent + * as long as we have it locked. + */ + if (!XFS_IS_QUOTA_ON(mp)) { + mutex_unlock(&q->qi_dqlist_lock); + return 0; + } + ASSERT(mutex_is_locked(&q->qi_dqlist_lock)); + list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) { + /* + * If this is vfs_sync calling, then skip the dquots that + * don't 'seem' to be dirty. ie. don't acquire dqlock. + * This is very similar to what xfs_sync does with inodes. + */ + if (flags & SYNC_TRYLOCK) { + if (!XFS_DQ_IS_DIRTY(dqp)) + continue; + if (!xfs_qm_dqlock_nowait(dqp)) + continue; + } else { + xfs_dqlock(dqp); + } + + /* + * Now, find out for sure if this dquot is dirty or not. + */ + if (! XFS_DQ_IS_DIRTY(dqp)) { + xfs_dqunlock(dqp); + continue; + } + + /* XXX a sentinel would be better */ + recl = q->qi_dqreclaims; + if (!xfs_dqflock_nowait(dqp)) { + if (flags & SYNC_TRYLOCK) { + xfs_dqunlock(dqp); + continue; + } + /* + * If we can't grab the flush lock then if the caller + * really wanted us to give this our best shot, so + * see if we can give a push to the buffer before we wait + * on the flush lock. At this point, we know that + * even though the dquot is being flushed, + * it has (new) dirty data. + */ + xfs_qm_dqflock_pushbuf_wait(dqp); + } + /* + * Let go of the mplist lock. We don't want to hold it + * across a disk write + */ + mutex_unlock(&q->qi_dqlist_lock); + error = xfs_qm_dqflush(dqp, flags); + xfs_dqunlock(dqp); + if (error && XFS_FORCED_SHUTDOWN(mp)) + return 0; /* Need to prevent umount failure */ + else if (error) + return error; + + mutex_lock(&q->qi_dqlist_lock); + if (recl != q->qi_dqreclaims) { + if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS) + break; + + mutex_unlock(&q->qi_dqlist_lock); + goto again; + } + } + + mutex_unlock(&q->qi_dqlist_lock); + return 0; +} + +/* + * The hash chains and the mplist use the same xfs_dqhash structure as + * their list head, but we can take the mplist qh_lock and one of the + * hash qh_locks at the same time without any problem as they aren't + * related. + */ +static struct lock_class_key xfs_quota_mplist_class; + +/* + * This initializes all the quota information that's kept in the + * mount structure + */ +STATIC int +xfs_qm_init_quotainfo( + xfs_mount_t *mp) +{ + xfs_quotainfo_t *qinf; + int error; + xfs_dquot_t *dqp; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * Tell XQM that we exist as soon as possible. + */ + if ((error = xfs_qm_hold_quotafs_ref(mp))) { + return error; + } + + qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); + + /* + * See if quotainodes are setup, and if not, allocate them, + * and change the superblock accordingly. + */ + if ((error = xfs_qm_init_quotainos(mp))) { + kmem_free(qinf); + mp->m_quotainfo = NULL; + return error; + } + + INIT_LIST_HEAD(&qinf->qi_dqlist); + mutex_init(&qinf->qi_dqlist_lock); + lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class); + + qinf->qi_dqreclaims = 0; + + /* mutex used to serialize quotaoffs */ + mutex_init(&qinf->qi_quotaofflock); + + /* Precalc some constants */ + qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); + ASSERT(qinf->qi_dqchunklen); + qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); + do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); + + mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); + + /* + * We try to get the limits from the superuser's limits fields. + * This is quite hacky, but it is standard quota practice. + * We look at the USR dquot with id == 0 first, but if user quotas + * are not enabled we goto the GRP dquot with id == 0. + * We don't really care to keep separate default limits for user + * and group quotas, at least not at this point. + */ + error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0, + XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : + (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : + XFS_DQ_PROJ), + XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN, + &dqp); + if (! error) { + xfs_disk_dquot_t *ddqp = &dqp->q_core; + + /* + * The warnings and timers set the grace period given to + * a user or group before he or she can not perform any + * more writing. If it is zero, a default is used. + */ + qinf->qi_btimelimit = ddqp->d_btimer ? + be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; + qinf->qi_itimelimit = ddqp->d_itimer ? + be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; + qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? + be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; + qinf->qi_bwarnlimit = ddqp->d_bwarns ? + be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; + qinf->qi_iwarnlimit = ddqp->d_iwarns ? + be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; + qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? + be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; + qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); + qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); + qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); + qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); + qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); + qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); + + /* + * We sent the XFS_QMOPT_DQSUSER flag to dqget because + * we don't want this dquot cached. We haven't done a + * quotacheck yet, and quotacheck doesn't like incore dquots. + */ + xfs_qm_dqdestroy(dqp); + } else { + qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; + qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; + qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; + qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; + qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; + qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; + } + + return 0; +} + + +/* + * Gets called when unmounting a filesystem or when all quotas get + * turned off. + * This purges the quota inodes, destroys locks and frees itself. + */ +void +xfs_qm_destroy_quotainfo( + xfs_mount_t *mp) +{ + xfs_quotainfo_t *qi; + + qi = mp->m_quotainfo; + ASSERT(qi != NULL); + ASSERT(xfs_Gqm != NULL); + + /* + * Release the reference that XQM kept, so that we know + * when the XQM structure should be freed. We cannot assume + * that xfs_Gqm is non-null after this point. + */ + xfs_qm_rele_quotafs_ref(mp); + + ASSERT(list_empty(&qi->qi_dqlist)); + mutex_destroy(&qi->qi_dqlist_lock); + + if (qi->qi_uquotaip) { + IRELE(qi->qi_uquotaip); + qi->qi_uquotaip = NULL; /* paranoia */ + } + if (qi->qi_gquotaip) { + IRELE(qi->qi_gquotaip); + qi->qi_gquotaip = NULL; + } + mutex_destroy(&qi->qi_quotaofflock); + kmem_free(qi); + mp->m_quotainfo = NULL; +} + + + +/* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */ + +/* ARGSUSED */ +STATIC void +xfs_qm_list_init( + xfs_dqlist_t *list, + char *str, + int n) +{ + mutex_init(&list->qh_lock); + INIT_LIST_HEAD(&list->qh_list); + list->qh_version = 0; + list->qh_nelems = 0; +} + +STATIC void +xfs_qm_list_destroy( + xfs_dqlist_t *list) +{ + mutex_destroy(&(list->qh_lock)); +} + +/* + * Create an inode and return with a reference already taken, but unlocked + * This is how we create quota inodes + */ +STATIC int +xfs_qm_qino_alloc( + xfs_mount_t *mp, + xfs_inode_t **ip, + __int64_t sbfields, + uint flags) +{ + xfs_trans_t *tp; + int error; + int committed; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); + if ((error = xfs_trans_reserve(tp, + XFS_QM_QINOCREATE_SPACE_RES(mp), + XFS_CREATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_CREATE_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return error; + } + + error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); + if (error) { + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT); + return error; + } + + /* + * Make the changes in the superblock, and log those too. + * sbfields arg may contain fields other than *QUOTINO; + * VERSIONNUM for example. + */ + spin_lock(&mp->m_sb_lock); + if (flags & XFS_QMOPT_SBVERSION) { + ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); + ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == + (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); + + xfs_sb_version_addquota(&mp->m_sb); + mp->m_sb.sb_uquotino = NULLFSINO; + mp->m_sb.sb_gquotino = NULLFSINO; + + /* qflags will get updated _after_ quotacheck */ + mp->m_sb.sb_qflags = 0; + } + if (flags & XFS_QMOPT_UQUOTA) + mp->m_sb.sb_uquotino = (*ip)->i_ino; + else + mp->m_sb.sb_gquotino = (*ip)->i_ino; + spin_unlock(&mp->m_sb_lock); + xfs_mod_sb(tp, sbfields); + + if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { + xfs_alert(mp, "%s failed (error %d)!", __func__, error); + return error; + } + return 0; +} + + +STATIC void +xfs_qm_reset_dqcounts( + xfs_mount_t *mp, + xfs_buf_t *bp, + xfs_dqid_t id, + uint type) +{ + xfs_disk_dquot_t *ddq; + int j; + + trace_xfs_reset_dqcounts(bp, _RET_IP_); + + /* + * Reset all counters and timers. They'll be + * started afresh by xfs_qm_quotacheck. + */ +#ifdef DEBUG + j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); + do_div(j, sizeof(xfs_dqblk_t)); + ASSERT(mp->m_quotainfo->qi_dqperchunk == j); +#endif + ddq = bp->b_addr; + for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { + /* + * Do a sanity check, and if needed, repair the dqblk. Don't + * output any warnings because it's perfectly possible to + * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. + */ + (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, + "xfs_quotacheck"); + ddq->d_bcount = 0; + ddq->d_icount = 0; + ddq->d_rtbcount = 0; + ddq->d_btimer = 0; + ddq->d_itimer = 0; + ddq->d_rtbtimer = 0; + ddq->d_bwarns = 0; + ddq->d_iwarns = 0; + ddq->d_rtbwarns = 0; + ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); + } +} + +STATIC int +xfs_qm_dqiter_bufs( + xfs_mount_t *mp, + xfs_dqid_t firstid, + xfs_fsblock_t bno, + xfs_filblks_t blkcnt, + uint flags) +{ + xfs_buf_t *bp; + int error; + int type; + + ASSERT(blkcnt > 0); + type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : + (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); + error = 0; + + /* + * Blkcnt arg can be a very big number, and might even be + * larger than the log itself. So, we have to break it up into + * manageable-sized transactions. + * Note that we don't start a permanent transaction here; we might + * not be able to get a log reservation for the whole thing up front, + * and we don't really care to either, because we just discard + * everything if we were to crash in the middle of this loop. + */ + while (blkcnt--) { + error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, bno), + mp->m_quotainfo->qi_dqchunklen, 0, &bp); + if (error) + break; + + xfs_qm_reset_dqcounts(mp, bp, firstid, type); + xfs_bdwrite(mp, bp); + /* + * goto the next block. + */ + bno++; + firstid += mp->m_quotainfo->qi_dqperchunk; + } + return error; +} + +/* + * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a + * caller supplied function for every chunk of dquots that we find. + */ +STATIC int +xfs_qm_dqiterate( + xfs_mount_t *mp, + xfs_inode_t *qip, + uint flags) +{ + xfs_bmbt_irec_t *map; + int i, nmaps; /* number of map entries */ + int error; /* return value */ + xfs_fileoff_t lblkno; + xfs_filblks_t maxlblkcnt; + xfs_dqid_t firstid; + xfs_fsblock_t rablkno; + xfs_filblks_t rablkcnt; + + error = 0; + /* + * This looks racy, but we can't keep an inode lock across a + * trans_reserve. But, this gets called during quotacheck, and that + * happens only at mount time which is single threaded. + */ + if (qip->i_d.di_nblocks == 0) + return 0; + + map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); + + lblkno = 0; + maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); + do { + nmaps = XFS_DQITER_MAP_SIZE; + /* + * We aren't changing the inode itself. Just changing + * some of its data. No new blocks are added here, and + * the inode is never added to the transaction. + */ + xfs_ilock(qip, XFS_ILOCK_SHARED); + error = xfs_bmapi(NULL, qip, lblkno, + maxlblkcnt - lblkno, + XFS_BMAPI_METADATA, + NULL, + 0, map, &nmaps, NULL); + xfs_iunlock(qip, XFS_ILOCK_SHARED); + if (error) + break; + + ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); + for (i = 0; i < nmaps; i++) { + ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); + ASSERT(map[i].br_blockcount); + + + lblkno += map[i].br_blockcount; + + if (map[i].br_startblock == HOLESTARTBLOCK) + continue; + + firstid = (xfs_dqid_t) map[i].br_startoff * + mp->m_quotainfo->qi_dqperchunk; + /* + * Do a read-ahead on the next extent. + */ + if ((i+1 < nmaps) && + (map[i+1].br_startblock != HOLESTARTBLOCK)) { + rablkcnt = map[i+1].br_blockcount; + rablkno = map[i+1].br_startblock; + while (rablkcnt--) { + xfs_buf_readahead(mp->m_ddev_targp, + XFS_FSB_TO_DADDR(mp, rablkno), + mp->m_quotainfo->qi_dqchunklen); + rablkno++; + } + } + /* + * Iterate thru all the blks in the extent and + * reset the counters of all the dquots inside them. + */ + if ((error = xfs_qm_dqiter_bufs(mp, + firstid, + map[i].br_startblock, + map[i].br_blockcount, + flags))) { + break; + } + } + + if (error) + break; + } while (nmaps > 0); + + kmem_free(map); + + return error; +} + +/* + * Called by dqusage_adjust in doing a quotacheck. + * + * Given the inode, and a dquot id this updates both the incore dqout as well + * as the buffer copy. This is so that once the quotacheck is done, we can + * just log all the buffers, as opposed to logging numerous updates to + * individual dquots. + */ +STATIC int +xfs_qm_quotacheck_dqadjust( + struct xfs_inode *ip, + xfs_dqid_t id, + uint type, + xfs_qcnt_t nblks, + xfs_qcnt_t rtblks) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_dquot *dqp; + int error; + + error = xfs_qm_dqget(mp, ip, id, type, + XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); + if (error) { + /* + * Shouldn't be able to turn off quotas here. + */ + ASSERT(error != ESRCH); + ASSERT(error != ENOENT); + return error; + } + + trace_xfs_dqadjust(dqp); + + /* + * Adjust the inode count and the block count to reflect this inode's + * resource usage. + */ + be64_add_cpu(&dqp->q_core.d_icount, 1); + dqp->q_res_icount++; + if (nblks) { + be64_add_cpu(&dqp->q_core.d_bcount, nblks); + dqp->q_res_bcount += nblks; + } + if (rtblks) { + be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); + dqp->q_res_rtbcount += rtblks; + } + + /* + * Set default limits, adjust timers (since we changed usages) + * + * There are no timers for the default values set in the root dquot. + */ + if (dqp->q_core.d_id) { + xfs_qm_adjust_dqlimits(mp, &dqp->q_core); + xfs_qm_adjust_dqtimers(mp, &dqp->q_core); + } + + dqp->dq_flags |= XFS_DQ_DIRTY; + xfs_qm_dqput(dqp); + return 0; +} + +STATIC int +xfs_qm_get_rtblks( + xfs_inode_t *ip, + xfs_qcnt_t *O_rtblks) +{ + xfs_filblks_t rtblks; /* total rt blks */ + xfs_extnum_t idx; /* extent record index */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t nextents; /* number of extent entries */ + int error; + + ASSERT(XFS_IS_REALTIME_INODE(ip)); + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + if (!(ifp->if_flags & XFS_IFEXTENTS)) { + if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) + return error; + } + rtblks = 0; + nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); + for (idx = 0; idx < nextents; idx++) + rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); + *O_rtblks = (xfs_qcnt_t)rtblks; + return 0; +} + +/* + * callback routine supplied to bulkstat(). Given an inumber, find its + * dquots and update them to account for resources taken by that inode. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_dqusage_adjust( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_ino_t ino, /* inode number to get data for */ + void __user *buffer, /* not used */ + int ubsize, /* not used */ + int *ubused, /* not used */ + int *res) /* result code value */ +{ + xfs_inode_t *ip; + xfs_qcnt_t nblks, rtblks = 0; + int error; + + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * rootino must have its resources accounted for, not so with the quota + * inodes. + */ + if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { + *res = BULKSTAT_RV_NOTHING; + return XFS_ERROR(EINVAL); + } + + /* + * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget + * interface expects the inode to be exclusively locked because that's + * the case in all other instances. It's OK that we do this because + * quotacheck is done only at mount time. + */ + error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); + if (error) { + *res = BULKSTAT_RV_NOTHING; + return error; + } + + ASSERT(ip->i_delayed_blks == 0); + + if (XFS_IS_REALTIME_INODE(ip)) { + /* + * Walk thru the extent list and count the realtime blocks. + */ + error = xfs_qm_get_rtblks(ip, &rtblks); + if (error) + goto error0; + } + + nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; + + /* + * Add the (disk blocks and inode) resources occupied by this + * inode to its dquots. We do this adjustment in the incore dquot, + * and also copy the changes to its buffer. + * We don't care about putting these changes in a transaction + * envelope because if we crash in the middle of a 'quotacheck' + * we have to start from the beginning anyway. + * Once we're done, we'll log all the dquot bufs. + * + * The *QUOTA_ON checks below may look pretty racy, but quotachecks + * and quotaoffs don't race. (Quotachecks happen at mount time only). + */ + if (XFS_IS_UQUOTA_ON(mp)) { + error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, + XFS_DQ_USER, nblks, rtblks); + if (error) + goto error0; + } + + if (XFS_IS_GQUOTA_ON(mp)) { + error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, + XFS_DQ_GROUP, nblks, rtblks); + if (error) + goto error0; + } + + if (XFS_IS_PQUOTA_ON(mp)) { + error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), + XFS_DQ_PROJ, nblks, rtblks); + if (error) + goto error0; + } + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + IRELE(ip); + *res = BULKSTAT_RV_DIDONE; + return 0; + +error0: + xfs_iunlock(ip, XFS_ILOCK_EXCL); + IRELE(ip); + *res = BULKSTAT_RV_GIVEUP; + return error; +} + +/* + * Walk thru all the filesystem inodes and construct a consistent view + * of the disk quota world. If the quotacheck fails, disable quotas. + */ +int +xfs_qm_quotacheck( + xfs_mount_t *mp) +{ + int done, count, error; + xfs_ino_t lastino; + size_t structsz; + xfs_inode_t *uip, *gip; + uint flags; + + count = INT_MAX; + structsz = 1; + lastino = 0; + flags = 0; + + ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + /* + * There should be no cached dquots. The (simplistic) quotacheck + * algorithm doesn't like that. + */ + ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); + + xfs_notice(mp, "Quotacheck needed: Please wait."); + + /* + * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset + * their counters to zero. We need a clean slate. + * We don't log our changes till later. + */ + uip = mp->m_quotainfo->qi_uquotaip; + if (uip) { + error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA); + if (error) + goto error_return; + flags |= XFS_UQUOTA_CHKD; + } + + gip = mp->m_quotainfo->qi_gquotaip; + if (gip) { + error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? + XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); + if (error) + goto error_return; + flags |= XFS_OQUOTA_CHKD; + } + + do { + /* + * Iterate thru all the inodes in the file system, + * adjusting the corresponding dquot counters in core. + */ + error = xfs_bulkstat(mp, &lastino, &count, + xfs_qm_dqusage_adjust, + structsz, NULL, &done); + if (error) + break; + + } while (!done); + + /* + * We've made all the changes that we need to make incore. + * Flush them down to disk buffers if everything was updated + * successfully. + */ + if (!error) + error = xfs_qm_dqflush_all(mp, 0); + + /* + * We can get this error if we couldn't do a dquot allocation inside + * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the + * dirty dquots that might be cached, we just want to get rid of them + * and turn quotaoff. The dquots won't be attached to any of the inodes + * at this point (because we intentionally didn't in dqget_noattach). + */ + if (error) { + xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); + goto error_return; + } + + /* + * We didn't log anything, because if we crashed, we'll have to + * start the quotacheck from scratch anyway. However, we must make + * sure that our dquot changes are secure before we put the + * quotacheck'd stamp on the superblock. So, here we do a synchronous + * flush. + */ + XFS_bflush(mp->m_ddev_targp); + + /* + * If one type of quotas is off, then it will lose its + * quotachecked status, since we won't be doing accounting for + * that type anymore. + */ + mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); + mp->m_qflags |= flags; + + error_return: + if (error) { + xfs_warn(mp, + "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", + error); + /* + * We must turn off quotas. + */ + ASSERT(mp->m_quotainfo != NULL); + ASSERT(xfs_Gqm != NULL); + xfs_qm_destroy_quotainfo(mp); + if (xfs_mount_reset_sbqflags(mp)) { + xfs_warn(mp, + "Quotacheck: Failed to reset quota flags."); + } + } else + xfs_notice(mp, "Quotacheck: Done."); + return (error); +} + +/* + * This is called after the superblock has been read in and we're ready to + * iget the quota inodes. + */ +STATIC int +xfs_qm_init_quotainos( + xfs_mount_t *mp) +{ + xfs_inode_t *uip, *gip; + int error; + __int64_t sbflags; + uint flags; + + ASSERT(mp->m_quotainfo); + uip = gip = NULL; + sbflags = 0; + flags = 0; + + /* + * Get the uquota and gquota inodes + */ + if (xfs_sb_version_hasquota(&mp->m_sb)) { + if (XFS_IS_UQUOTA_ON(mp) && + mp->m_sb.sb_uquotino != NULLFSINO) { + ASSERT(mp->m_sb.sb_uquotino > 0); + if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, + 0, 0, &uip))) + return XFS_ERROR(error); + } + if (XFS_IS_OQUOTA_ON(mp) && + mp->m_sb.sb_gquotino != NULLFSINO) { + ASSERT(mp->m_sb.sb_gquotino > 0); + if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, + 0, 0, &gip))) { + if (uip) + IRELE(uip); + return XFS_ERROR(error); + } + } + } else { + flags |= XFS_QMOPT_SBVERSION; + sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | + XFS_SB_GQUOTINO | XFS_SB_QFLAGS); + } + + /* + * Create the two inodes, if they don't exist already. The changes + * made above will get added to a transaction and logged in one of + * the qino_alloc calls below. If the device is readonly, + * temporarily switch to read-write to do this. + */ + if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { + if ((error = xfs_qm_qino_alloc(mp, &uip, + sbflags | XFS_SB_UQUOTINO, + flags | XFS_QMOPT_UQUOTA))) + return XFS_ERROR(error); + + flags &= ~XFS_QMOPT_SBVERSION; + } + if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { + flags |= (XFS_IS_GQUOTA_ON(mp) ? + XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); + error = xfs_qm_qino_alloc(mp, &gip, + sbflags | XFS_SB_GQUOTINO, flags); + if (error) { + if (uip) + IRELE(uip); + + return XFS_ERROR(error); + } + } + + mp->m_quotainfo->qi_uquotaip = uip; + mp->m_quotainfo->qi_gquotaip = gip; + + return 0; +} + + + +/* + * Just pop the least recently used dquot off the freelist and + * recycle it. The returned dquot is locked. + */ +STATIC xfs_dquot_t * +xfs_qm_dqreclaim_one(void) +{ + xfs_dquot_t *dqpout; + xfs_dquot_t *dqp; + int restarts; + int startagain; + + restarts = 0; + dqpout = NULL; + + /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ +again: + startagain = 0; + mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); + + list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { + struct xfs_mount *mp = dqp->q_mount; + xfs_dqlock(dqp); + + /* + * We are racing with dqlookup here. Naturally we don't + * want to reclaim a dquot that lookup wants. We release the + * freelist lock and start over, so that lookup will grab + * both the dquot and the freelistlock. + */ + if (dqp->dq_flags & XFS_DQ_WANT) { + ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); + + trace_xfs_dqreclaim_want(dqp); + XQM_STATS_INC(xqmstats.xs_qm_dqwants); + restarts++; + startagain = 1; + goto dqunlock; + } + + /* + * If the dquot is inactive, we are assured that it is + * not on the mplist or the hashlist, and that makes our + * life easier. + */ + if (dqp->dq_flags & XFS_DQ_INACTIVE) { + ASSERT(mp == NULL); + ASSERT(! XFS_DQ_IS_DIRTY(dqp)); + ASSERT(list_empty(&dqp->q_hashlist)); + ASSERT(list_empty(&dqp->q_mplist)); + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + dqpout = dqp; + XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); + goto dqunlock; + } + + ASSERT(dqp->q_hash); + ASSERT(!list_empty(&dqp->q_mplist)); + + /* + * Try to grab the flush lock. If this dquot is in the process + * of getting flushed to disk, we don't want to reclaim it. + */ + if (!xfs_dqflock_nowait(dqp)) + goto dqunlock; + + /* + * We have the flush lock so we know that this is not in the + * process of being flushed. So, if this is dirty, flush it + * DELWRI so that we don't get a freelist infested with + * dirty dquots. + */ + if (XFS_DQ_IS_DIRTY(dqp)) { + int error; + + trace_xfs_dqreclaim_dirty(dqp); + + /* + * We flush it delayed write, so don't bother + * releasing the freelist lock. + */ + error = xfs_qm_dqflush(dqp, 0); + if (error) { + xfs_warn(mp, "%s: dquot %p flush failed", + __func__, dqp); + } + goto dqunlock; + } + + /* + * We're trying to get the hashlock out of order. This races + * with dqlookup; so, we giveup and goto the next dquot if + * we couldn't get the hashlock. This way, we won't starve + * a dqlookup process that holds the hashlock that is + * waiting for the freelist lock. + */ + if (!mutex_trylock(&dqp->q_hash->qh_lock)) { + restarts++; + goto dqfunlock; + } + + /* + * This races with dquot allocation code as well as dqflush_all + * and reclaim code. So, if we failed to grab the mplist lock, + * giveup everything and start over. + */ + if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) { + restarts++; + startagain = 1; + goto qhunlock; + } + + ASSERT(dqp->q_nrefs == 0); + list_del_init(&dqp->q_mplist); + mp->m_quotainfo->qi_dquots--; + mp->m_quotainfo->qi_dqreclaims++; + list_del_init(&dqp->q_hashlist); + dqp->q_hash->qh_version++; + list_del_init(&dqp->q_freelist); + xfs_Gqm->qm_dqfrlist_cnt--; + dqpout = dqp; + mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); +qhunlock: + mutex_unlock(&dqp->q_hash->qh_lock); +dqfunlock: + xfs_dqfunlock(dqp); +dqunlock: + xfs_dqunlock(dqp); + if (dqpout) + break; + if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) + break; + if (startagain) { + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + goto again; + } + } + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + return dqpout; +} + +/* + * Traverse the freelist of dquots and attempt to reclaim a maximum of + * 'howmany' dquots. This operation races with dqlookup(), and attempts to + * favor the lookup function ... + */ +STATIC int +xfs_qm_shake_freelist( + int howmany) +{ + int nreclaimed = 0; + xfs_dquot_t *dqp; + + if (howmany <= 0) + return 0; + + while (nreclaimed < howmany) { + dqp = xfs_qm_dqreclaim_one(); + if (!dqp) + return nreclaimed; + xfs_qm_dqdestroy(dqp); + nreclaimed++; + } + return nreclaimed; +} + +/* + * The kmem_shake interface is invoked when memory is running low. + */ +/* ARGSUSED */ +STATIC int +xfs_qm_shake( + struct shrinker *shrink, + struct shrink_control *sc) +{ + int ndqused, nfree, n; + gfp_t gfp_mask = sc->gfp_mask; + + if (!kmem_shake_allow(gfp_mask)) + return 0; + if (!xfs_Gqm) + return 0; + + nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */ + /* incore dquots in all f/s's */ + ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree; + + ASSERT(ndqused >= 0); + + if (nfree <= ndqused && nfree < ndquot) + return 0; + + ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */ + n = nfree - ndqused - ndquot; /* # over target */ + + return xfs_qm_shake_freelist(MAX(nfree, n)); +} + + +/*------------------------------------------------------------------*/ + +/* + * Return a new incore dquot. Depending on the number of + * dquots in the system, we either allocate a new one on the kernel heap, + * or reclaim a free one. + * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed + * to reclaim an existing one from the freelist. + */ +boolean_t +xfs_qm_dqalloc_incore( + xfs_dquot_t **O_dqpp) +{ + xfs_dquot_t *dqp; + + /* + * Check against high water mark to see if we want to pop + * a nincompoop dquot off the freelist. + */ + if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) { + /* + * Try to recycle a dquot from the freelist. + */ + if ((dqp = xfs_qm_dqreclaim_one())) { + XQM_STATS_INC(xqmstats.xs_qm_dqreclaims); + /* + * Just zero the core here. The rest will get + * reinitialized by caller. XXX we shouldn't even + * do this zero ... + */ + memset(&dqp->q_core, 0, sizeof(dqp->q_core)); + *O_dqpp = dqp; + return B_FALSE; + } + XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses); + } + + /* + * Allocate a brand new dquot on the kernel heap and return it + * to the caller to initialize. + */ + ASSERT(xfs_Gqm->qm_dqzone != NULL); + *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP); + atomic_inc(&xfs_Gqm->qm_totaldquots); + + return B_TRUE; +} + + +/* + * Start a transaction and write the incore superblock changes to + * disk. flags parameter indicates which fields have changed. + */ +int +xfs_qm_write_sb_changes( + xfs_mount_t *mp, + __int64_t flags) +{ + xfs_trans_t *tp; + int error; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); + if ((error = xfs_trans_reserve(tp, 0, + mp->m_sb.sb_sectsize + 128, 0, + 0, + XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_mod_sb(tp, flags); + error = xfs_trans_commit(tp, 0); + + return error; +} + + +/* --------------- utility functions for vnodeops ---------------- */ + + +/* + * Given an inode, a uid, gid and prid make sure that we have + * allocated relevant dquot(s) on disk, and that we won't exceed inode + * quotas by creating this file. + * This also attaches dquot(s) to the given inode after locking it, + * and returns the dquots corresponding to the uid and/or gid. + * + * in : inode (unlocked) + * out : udquot, gdquot with references taken and unlocked + */ +int +xfs_qm_vop_dqalloc( + struct xfs_inode *ip, + uid_t uid, + gid_t gid, + prid_t prid, + uint flags, + struct xfs_dquot **O_udqpp, + struct xfs_dquot **O_gdqpp) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_dquot *uq, *gq; + int error; + uint lockflags; + + if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) + return 0; + + lockflags = XFS_ILOCK_EXCL; + xfs_ilock(ip, lockflags); + + if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) + gid = ip->i_d.di_gid; + + /* + * Attach the dquot(s) to this inode, doing a dquot allocation + * if necessary. The dquot(s) will not be locked. + */ + if (XFS_NOT_DQATTACHED(mp, ip)) { + error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); + if (error) { + xfs_iunlock(ip, lockflags); + return error; + } + } + + uq = gq = NULL; + if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { + if (ip->i_d.di_uid != uid) { + /* + * What we need is the dquot that has this uid, and + * if we send the inode to dqget, the uid of the inode + * takes priority over what's sent in the uid argument. + * We must unlock inode here before calling dqget if + * we're not sending the inode, because otherwise + * we'll deadlock by doing trans_reserve while + * holding ilock. + */ + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, + XFS_DQ_USER, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &uq))) { + ASSERT(error != ENOENT); + return error; + } + /* + * Get the ilock in the right order. + */ + xfs_dqunlock(uq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + /* + * Take an extra reference, because we'll return + * this to caller + */ + ASSERT(ip->i_udquot); + uq = ip->i_udquot; + xfs_dqlock(uq); + XFS_DQHOLD(uq); + xfs_dqunlock(uq); + } + } + if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { + if (ip->i_d.di_gid != gid) { + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, + XFS_DQ_GROUP, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &gq))) { + if (uq) + xfs_qm_dqrele(uq); + ASSERT(error != ENOENT); + return error; + } + xfs_dqunlock(gq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + ASSERT(ip->i_gdquot); + gq = ip->i_gdquot; + xfs_dqlock(gq); + XFS_DQHOLD(gq); + xfs_dqunlock(gq); + } + } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { + if (xfs_get_projid(ip) != prid) { + xfs_iunlock(ip, lockflags); + if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, + XFS_DQ_PROJ, + XFS_QMOPT_DQALLOC | + XFS_QMOPT_DOWARN, + &gq))) { + if (uq) + xfs_qm_dqrele(uq); + ASSERT(error != ENOENT); + return (error); + } + xfs_dqunlock(gq); + lockflags = XFS_ILOCK_SHARED; + xfs_ilock(ip, lockflags); + } else { + ASSERT(ip->i_gdquot); + gq = ip->i_gdquot; + xfs_dqlock(gq); + XFS_DQHOLD(gq); + xfs_dqunlock(gq); + } + } + if (uq) + trace_xfs_dquot_dqalloc(ip); + + xfs_iunlock(ip, lockflags); + if (O_udqpp) + *O_udqpp = uq; + else if (uq) + xfs_qm_dqrele(uq); + if (O_gdqpp) + *O_gdqpp = gq; + else if (gq) + xfs_qm_dqrele(gq); + return 0; +} + +/* + * Actually transfer ownership, and do dquot modifications. + * These were already reserved. + */ +xfs_dquot_t * +xfs_qm_vop_chown( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t **IO_olddq, + xfs_dquot_t *newdq) +{ + xfs_dquot_t *prevdq; + uint bfield = XFS_IS_REALTIME_INODE(ip) ? + XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; + + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); + + /* old dquot */ + prevdq = *IO_olddq; + ASSERT(prevdq); + ASSERT(prevdq != newdq); + + xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); + xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); + + /* the sparkling new dquot */ + xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); + xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); + + /* + * Take an extra reference, because the inode + * is going to keep this dquot pointer even + * after the trans_commit. + */ + xfs_dqlock(newdq); + XFS_DQHOLD(newdq); + xfs_dqunlock(newdq); + *IO_olddq = newdq; + + return prevdq; +} + +/* + * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). + */ +int +xfs_qm_vop_chown_reserve( + xfs_trans_t *tp, + xfs_inode_t *ip, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp, + uint flags) +{ + xfs_mount_t *mp = ip->i_mount; + uint delblks, blkflags, prjflags = 0; + xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; + int error; + + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + delblks = ip->i_delayed_blks; + delblksudq = delblksgdq = unresudq = unresgdq = NULL; + blkflags = XFS_IS_REALTIME_INODE(ip) ? + XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; + + if (XFS_IS_UQUOTA_ON(mp) && udqp && + ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { + delblksudq = udqp; + /* + * If there are delayed allocation blocks, then we have to + * unreserve those from the old dquot, and add them to the + * new dquot. + */ + if (delblks) { + ASSERT(ip->i_udquot); + unresudq = ip->i_udquot; + } + } + if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { + if (XFS_IS_PQUOTA_ON(ip->i_mount) && + xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) + prjflags = XFS_QMOPT_ENOSPC; + + if (prjflags || + (XFS_IS_GQUOTA_ON(ip->i_mount) && + ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { + delblksgdq = gdqp; + if (delblks) { + ASSERT(ip->i_gdquot); + unresgdq = ip->i_gdquot; + } + } + } + + if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, + delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, + flags | blkflags | prjflags))) + return (error); + + /* + * Do the delayed blks reservations/unreservations now. Since, these + * are done without the help of a transaction, if a reservation fails + * its previous reservations won't be automatically undone by trans + * code. So, we have to do it manually here. + */ + if (delblks) { + /* + * Do the reservations first. Unreservation can't fail. + */ + ASSERT(delblksudq || delblksgdq); + ASSERT(unresudq || unresgdq); + if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, + delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, + flags | blkflags | prjflags))) + return (error); + xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, + unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, + blkflags); + } + + return (0); +} + +int +xfs_qm_vop_rename_dqattach( + struct xfs_inode **i_tab) +{ + struct xfs_mount *mp = i_tab[0]->i_mount; + int i; + + if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) + return 0; + + for (i = 0; (i < 4 && i_tab[i]); i++) { + struct xfs_inode *ip = i_tab[i]; + int error; + + /* + * Watch out for duplicate entries in the table. + */ + if (i == 0 || ip != i_tab[i-1]) { + if (XFS_NOT_DQATTACHED(mp, ip)) { + error = xfs_qm_dqattach(ip, 0); + if (error) + return error; + } + } + } + return 0; +} + +void +xfs_qm_vop_create_dqattach( + struct xfs_trans *tp, + struct xfs_inode *ip, + struct xfs_dquot *udqp, + struct xfs_dquot *gdqp) +{ + struct xfs_mount *mp = tp->t_mountp; + + if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) + return; + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); + + if (udqp) { + xfs_dqlock(udqp); + XFS_DQHOLD(udqp); + xfs_dqunlock(udqp); + ASSERT(ip->i_udquot == NULL); + ip->i_udquot = udqp; + ASSERT(XFS_IS_UQUOTA_ON(mp)); + ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); + xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); + } + if (gdqp) { + xfs_dqlock(gdqp); + XFS_DQHOLD(gdqp); + xfs_dqunlock(gdqp); + ASSERT(ip->i_gdquot == NULL); + ip->i_gdquot = gdqp; + ASSERT(XFS_IS_OQUOTA_ON(mp)); + ASSERT((XFS_IS_GQUOTA_ON(mp) ? + ip->i_d.di_gid : xfs_get_projid(ip)) == + be32_to_cpu(gdqp->q_core.d_id)); + xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); + } +} + diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h new file mode 100644 index 0000000..43b9abe --- /dev/null +++ b/fs/xfs/xfs_qm.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_QM_H__ +#define __XFS_QM_H__ + +#include "xfs_dquot_item.h" +#include "xfs_dquot.h" +#include "xfs_quota_priv.h" +#include "xfs_qm_stats.h" + +struct xfs_qm; +struct xfs_inode; + +extern uint ndquot; +extern struct mutex xfs_Gqm_lock; +extern struct xfs_qm *xfs_Gqm; +extern kmem_zone_t *qm_dqzone; +extern kmem_zone_t *qm_dqtrxzone; + +/* + * Used in xfs_qm_sync called by xfs_sync to count the max times that it can + * iterate over the mountpt's dquot list in one call. + */ +#define XFS_QM_SYNC_MAX_RESTARTS 7 + +/* + * Ditto, for xfs_qm_dqreclaim_one. + */ +#define XFS_QM_RECLAIM_MAX_RESTARTS 4 + +/* + * Ideal ratio of free to in use dquots. Quota manager makes an attempt + * to keep this balance. + */ +#define XFS_QM_DQFREE_RATIO 2 + +/* + * Dquot hashtable constants/threshold values. + */ +#define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) +#define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t)) + +/* + * This defines the unit of allocation of dquots. + * Currently, it is just one file system block, and a 4K blk contains 30 + * (136 * 30 = 4080) dquots. It's probably not worth trying to make + * this more dynamic. + * XXXsup However, if this number is changed, we have to make sure that we don't + * implicitly assume that we do allocations in chunks of a single filesystem + * block in the dquot/xqm code. + */ +#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 + +typedef xfs_dqhash_t xfs_dqlist_t; + +/* + * Quota Manager (global) structure. Lives only in core. + */ +typedef struct xfs_qm { + xfs_dqlist_t *qm_usr_dqhtable;/* udquot hash table */ + xfs_dqlist_t *qm_grp_dqhtable;/* gdquot hash table */ + uint qm_dqhashmask; /* # buckets in dq hashtab - 1 */ + struct list_head qm_dqfrlist; /* freelist of dquots */ + struct mutex qm_dqfrlist_lock; + int qm_dqfrlist_cnt; + atomic_t qm_totaldquots; /* total incore dquots */ + uint qm_nrefs; /* file systems with quota on */ + int qm_dqfree_ratio;/* ratio of free to inuse dquots */ + kmem_zone_t *qm_dqzone; /* dquot mem-alloc zone */ + kmem_zone_t *qm_dqtrxzone; /* t_dqinfo of transactions */ +} xfs_qm_t; + +/* + * Various quota information for individual filesystems. + * The mount structure keeps a pointer to this. + */ +typedef struct xfs_quotainfo { + xfs_inode_t *qi_uquotaip; /* user quota inode */ + xfs_inode_t *qi_gquotaip; /* group quota inode */ + struct list_head qi_dqlist; /* all dquots in filesys */ + struct mutex qi_dqlist_lock; + int qi_dquots; + int qi_dqreclaims; /* a change here indicates + a removal in the dqlist */ + time_t qi_btimelimit; /* limit for blks timer */ + time_t qi_itimelimit; /* limit for inodes timer */ + time_t qi_rtbtimelimit;/* limit for rt blks timer */ + xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */ + xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */ + xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */ + struct mutex qi_quotaofflock;/* to serialize quotaoff */ + xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */ + uint qi_dqperchunk; /* # ondisk dqs in above chunk */ + xfs_qcnt_t qi_bhardlimit; /* default data blk hard limit */ + xfs_qcnt_t qi_bsoftlimit; /* default data blk soft limit */ + xfs_qcnt_t qi_ihardlimit; /* default inode count hard limit */ + xfs_qcnt_t qi_isoftlimit; /* default inode count soft limit */ + xfs_qcnt_t qi_rtbhardlimit;/* default realtime blk hard limit */ + xfs_qcnt_t qi_rtbsoftlimit;/* default realtime blk soft limit */ +} xfs_quotainfo_t; + + +extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long); +extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *, + xfs_dquot_t *, xfs_dquot_t *, long, long, uint); +extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *); +extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *); + +/* + * We keep the usr and grp dquots separately so that locking will be easier + * to do at commit time. All transactions that we know of at this point + * affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value. + */ +#define XFS_QM_TRANS_MAXDQS 2 +typedef struct xfs_dquot_acct { + xfs_dqtrx_t dqa_usrdquots[XFS_QM_TRANS_MAXDQS]; + xfs_dqtrx_t dqa_grpdquots[XFS_QM_TRANS_MAXDQS]; +} xfs_dquot_acct_t; + +/* + * Users are allowed to have a usage exceeding their softlimit for + * a period this long. + */ +#define XFS_QM_BTIMELIMIT (7 * 24*60*60) /* 1 week */ +#define XFS_QM_RTBTIMELIMIT (7 * 24*60*60) /* 1 week */ +#define XFS_QM_ITIMELIMIT (7 * 24*60*60) /* 1 week */ + +#define XFS_QM_BWARNLIMIT 5 +#define XFS_QM_IWARNLIMIT 5 +#define XFS_QM_RTBWARNLIMIT 5 + +extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); +extern int xfs_qm_quotacheck(xfs_mount_t *); +extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); + +/* dquot stuff */ +extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **); +extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint); +extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint); + +/* quota ops */ +extern int xfs_qm_scall_trunc_qfiles(xfs_mount_t *, uint); +extern int xfs_qm_scall_getquota(xfs_mount_t *, xfs_dqid_t, uint, + fs_disk_quota_t *); +extern int xfs_qm_scall_setqlim(xfs_mount_t *, xfs_dqid_t, uint, + fs_disk_quota_t *); +extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); +extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); +extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); + +#endif /* __XFS_QM_H__ */ diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c new file mode 100644 index 0000000..a0a829a --- /dev/null +++ b/fs/xfs/xfs_qm_bhv.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_qm.h" + + +STATIC void +xfs_fill_statvfs_from_dquot( + struct kstatfs *statp, + xfs_disk_dquot_t *dp) +{ + __uint64_t limit; + + limit = dp->d_blk_softlimit ? + be64_to_cpu(dp->d_blk_softlimit) : + be64_to_cpu(dp->d_blk_hardlimit); + if (limit && statp->f_blocks > limit) { + statp->f_blocks = limit; + statp->f_bfree = statp->f_bavail = + (statp->f_blocks > be64_to_cpu(dp->d_bcount)) ? + (statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0; + } + + limit = dp->d_ino_softlimit ? + be64_to_cpu(dp->d_ino_softlimit) : + be64_to_cpu(dp->d_ino_hardlimit); + if (limit && statp->f_files > limit) { + statp->f_files = limit; + statp->f_ffree = + (statp->f_files > be64_to_cpu(dp->d_icount)) ? + (statp->f_ffree - be64_to_cpu(dp->d_icount)) : 0; + } +} + + +/* + * Directory tree accounting is implemented using project quotas, where + * the project identifier is inherited from parent directories. + * A statvfs (df, etc.) of a directory that is using project quota should + * return a statvfs of the project, not the entire filesystem. + * This makes such trees appear as if they are filesystems in themselves. + */ +void +xfs_qm_statvfs( + xfs_inode_t *ip, + struct kstatfs *statp) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_dquot_t *dqp; + + if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) { + xfs_fill_statvfs_from_dquot(statp, &dqp->q_core); + xfs_qm_dqput(dqp); + } +} + +int +xfs_qm_newmount( + xfs_mount_t *mp, + uint *needquotamount, + uint *quotaflags) +{ + uint quotaondisk; + uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; + + quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && + (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); + + if (quotaondisk) { + uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; + pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT; + gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; + } + + /* + * If the device itself is read-only, we can't allow + * the user to change the state of quota on the mount - + * this would generate a transaction on the ro device, + * which would lead to an I/O error and shutdown + */ + + if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || + (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || + (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) || + (!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) || + (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || + (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) && + xfs_dev_is_read_only(mp, "changing quota state")) { + xfs_warn(mp, "please mount with%s%s%s%s.", + (!quotaondisk ? "out quota" : ""), + (uquotaondisk ? " usrquota" : ""), + (pquotaondisk ? " prjquota" : ""), + (gquotaondisk ? " grpquota" : "")); + return XFS_ERROR(EPERM); + } + + if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { + /* + * Call mount_quotas at this point only if we won't have to do + * a quotacheck. + */ + if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { + /* + * If an error occurred, qm_mount_quotas code + * has already disabled quotas. So, just finish + * mounting, and get on with the boring life + * without disk quotas. + */ + xfs_qm_mount_quotas(mp); + } else { + /* + * Clear the quota flags, but remember them. This + * is so that the quota code doesn't get invoked + * before we're ready. This can happen when an + * inode goes inactive and wants to free blocks, + * or via xfs_log_mount_finish. + */ + *needquotamount = B_TRUE; + *quotaflags = mp->m_qflags; + mp->m_qflags = 0; + } + } + + return 0; +} + +void __init +xfs_qm_init(void) +{ + printk(KERN_INFO "SGI XFS Quota Management subsystem\n"); + mutex_init(&xfs_Gqm_lock); + xfs_qm_init_procfs(); +} + +void __exit +xfs_qm_exit(void) +{ + xfs_qm_cleanup_procfs(); + if (qm_dqzone) + kmem_zone_destroy(qm_dqzone); + if (qm_dqtrxzone) + kmem_zone_destroy(qm_dqtrxzone); +} diff --git a/fs/xfs/xfs_qm_stats.c b/fs/xfs/xfs_qm_stats.c new file mode 100644 index 0000000..8671a0b --- /dev/null +++ b/fs/xfs/xfs_qm_stats.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_qm.h" + +struct xqmstats xqmstats; + +static int xqm_proc_show(struct seq_file *m, void *v) +{ + /* maximum; incore; ratio free to inuse; freelist */ + seq_printf(m, "%d\t%d\t%d\t%u\n", + ndquot, + xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0, + xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0, + xfs_Gqm? xfs_Gqm->qm_dqfrlist_cnt : 0); + return 0; +} + +static int xqm_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xqm_proc_show, NULL); +} + +static const struct file_operations xqm_proc_fops = { + .owner = THIS_MODULE, + .open = xqm_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int xqmstat_proc_show(struct seq_file *m, void *v) +{ + /* quota performance statistics */ + seq_printf(m, "qm %u %u %u %u %u %u %u %u\n", + xqmstats.xs_qm_dqreclaims, + xqmstats.xs_qm_dqreclaim_misses, + xqmstats.xs_qm_dquot_dups, + xqmstats.xs_qm_dqcachemisses, + xqmstats.xs_qm_dqcachehits, + xqmstats.xs_qm_dqwants, + xqmstats.xs_qm_dqshake_reclaims, + xqmstats.xs_qm_dqinact_reclaims); + return 0; +} + +static int xqmstat_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xqmstat_proc_show, NULL); +} + +static const struct file_operations xqmstat_proc_fops = { + .owner = THIS_MODULE, + .open = xqmstat_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void +xfs_qm_init_procfs(void) +{ + proc_create("fs/xfs/xqmstat", 0, NULL, &xqmstat_proc_fops); + proc_create("fs/xfs/xqm", 0, NULL, &xqm_proc_fops); +} + +void +xfs_qm_cleanup_procfs(void) +{ + remove_proc_entry("fs/xfs/xqm", NULL); + remove_proc_entry("fs/xfs/xqmstat", NULL); +} diff --git a/fs/xfs/xfs_qm_stats.h b/fs/xfs/xfs_qm_stats.h new file mode 100644 index 0000000..5b964fc --- /dev/null +++ b/fs/xfs/xfs_qm_stats.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2002 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_QM_STATS_H__ +#define __XFS_QM_STATS_H__ + +#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) + +/* + * XQM global statistics + */ +struct xqmstats { + __uint32_t xs_qm_dqreclaims; + __uint32_t xs_qm_dqreclaim_misses; + __uint32_t xs_qm_dquot_dups; + __uint32_t xs_qm_dqcachemisses; + __uint32_t xs_qm_dqcachehits; + __uint32_t xs_qm_dqwants; + __uint32_t xs_qm_dqshake_reclaims; + __uint32_t xs_qm_dqinact_reclaims; +}; + +extern struct xqmstats xqmstats; + +# define XQM_STATS_INC(count) ( (count)++ ) + +extern void xfs_qm_init_procfs(void); +extern void xfs_qm_cleanup_procfs(void); + +#else + +# define XQM_STATS_INC(count) do { } while (0) + +static inline void xfs_qm_init_procfs(void) { }; +static inline void xfs_qm_cleanup_procfs(void) { }; + +#endif + +#endif /* __XFS_QM_STATS_H__ */ diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c new file mode 100644 index 0000000..609246f --- /dev/null +++ b/fs/xfs/xfs_qm_syscalls.c @@ -0,0 +1,906 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_qm.h" +#include "xfs_trace.h" + +STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); +STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, + uint); +STATIC uint xfs_qm_export_flags(uint); +STATIC uint xfs_qm_export_qtype_flags(uint); +STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *, + fs_disk_quota_t *); + + +/* + * Turn off quota accounting and/or enforcement for all udquots and/or + * gdquots. Called only at unmount time. + * + * This assumes that there are no dquots of this file system cached + * incore, and modifies the ondisk dquot directly. Therefore, for example, + * it is an error to call this twice, without purging the cache. + */ +int +xfs_qm_scall_quotaoff( + xfs_mount_t *mp, + uint flags) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + uint dqtype; + int error; + uint inactivate_flags; + xfs_qoff_logitem_t *qoffstart; + int nculprits; + + /* + * No file system can have quotas enabled on disk but not in core. + * Note that quota utilities (like quotaoff) _expect_ + * errno == EEXIST here. + */ + if ((mp->m_qflags & flags) == 0) + return XFS_ERROR(EEXIST); + error = 0; + + flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); + + /* + * We don't want to deal with two quotaoffs messing up each other, + * so we're going to serialize it. quotaoff isn't exactly a performance + * critical thing. + * If quotaoff, then we must be dealing with the root filesystem. + */ + ASSERT(q); + mutex_lock(&q->qi_quotaofflock); + + /* + * If we're just turning off quota enforcement, change mp and go. + */ + if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { + mp->m_qflags &= ~(flags); + + spin_lock(&mp->m_sb_lock); + mp->m_sb.sb_qflags = mp->m_qflags; + spin_unlock(&mp->m_sb_lock); + mutex_unlock(&q->qi_quotaofflock); + + /* XXX what to do if error ? Revert back to old vals incore ? */ + error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); + return (error); + } + + dqtype = 0; + inactivate_flags = 0; + /* + * If accounting is off, we must turn enforcement off, clear the + * quota 'CHKD' certificate to make it known that we have to + * do a quotacheck the next time this quota is turned on. + */ + if (flags & XFS_UQUOTA_ACCT) { + dqtype |= XFS_QMOPT_UQUOTA; + flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); + inactivate_flags |= XFS_UQUOTA_ACTIVE; + } + if (flags & XFS_GQUOTA_ACCT) { + dqtype |= XFS_QMOPT_GQUOTA; + flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); + inactivate_flags |= XFS_GQUOTA_ACTIVE; + } else if (flags & XFS_PQUOTA_ACCT) { + dqtype |= XFS_QMOPT_PQUOTA; + flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); + inactivate_flags |= XFS_PQUOTA_ACTIVE; + } + + /* + * Nothing to do? Don't complain. This happens when we're just + * turning off quota enforcement. + */ + if ((mp->m_qflags & flags) == 0) + goto out_unlock; + + /* + * Write the LI_QUOTAOFF log record, and do SB changes atomically, + * and synchronously. If we fail to write, we should abort the + * operation as it cannot be recovered safely if we crash. + */ + error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); + if (error) + goto out_unlock; + + /* + * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct + * to take care of the race between dqget and quotaoff. We don't take + * any special locks to reset these bits. All processes need to check + * these bits *after* taking inode lock(s) to see if the particular + * quota type is in the process of being turned off. If *ACTIVE, it is + * guaranteed that all dquot structures and all quotainode ptrs will all + * stay valid as long as that inode is kept locked. + * + * There is no turning back after this. + */ + mp->m_qflags &= ~inactivate_flags; + + /* + * Give back all the dquot reference(s) held by inodes. + * Here we go thru every single incore inode in this file system, and + * do a dqrele on the i_udquot/i_gdquot that it may have. + * Essentially, as long as somebody has an inode locked, this guarantees + * that quotas will not be turned off. This is handy because in a + * transaction once we lock the inode(s) and check for quotaon, we can + * depend on the quota inodes (and other things) being valid as long as + * we keep the lock(s). + */ + xfs_qm_dqrele_all_inodes(mp, flags); + + /* + * Next we make the changes in the quota flag in the mount struct. + * This isn't protected by a particular lock directly, because we + * don't want to take a mrlock every time we depend on quotas being on. + */ + mp->m_qflags &= ~(flags); + + /* + * Go through all the dquots of this file system and purge them, + * according to what was turned off. We may not be able to get rid + * of all dquots, because dquots can have temporary references that + * are not attached to inodes. eg. xfs_setattr, xfs_create. + * So, if we couldn't purge all the dquots from the filesystem, + * we can't get rid of the incore data structures. + */ + while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype))) + delay(10 * nculprits); + + /* + * Transactions that had started before ACTIVE state bit was cleared + * could have logged many dquots, so they'd have higher LSNs than + * the first QUOTAOFF log record does. If we happen to crash when + * the tail of the log has gone past the QUOTAOFF record, but + * before the last dquot modification, those dquots __will__ + * recover, and that's not good. + * + * So, we have QUOTAOFF start and end logitems; the start + * logitem won't get overwritten until the end logitem appears... + */ + error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); + if (error) { + /* We're screwed now. Shutdown is the only option. */ + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + goto out_unlock; + } + + /* + * If quotas is completely disabled, close shop. + */ + if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) || + ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) { + mutex_unlock(&q->qi_quotaofflock); + xfs_qm_destroy_quotainfo(mp); + return (0); + } + + /* + * Release our quotainode references if we don't need them anymore. + */ + if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { + IRELE(q->qi_uquotaip); + q->qi_uquotaip = NULL; + } + if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) { + IRELE(q->qi_gquotaip); + q->qi_gquotaip = NULL; + } + +out_unlock: + mutex_unlock(&q->qi_quotaofflock); + return error; +} + +STATIC int +xfs_qm_scall_trunc_qfile( + struct xfs_mount *mp, + xfs_ino_t ino) +{ + struct xfs_inode *ip; + struct xfs_trans *tp; + int error; + + if (ino == NULLFSINO) + return 0; + + error = xfs_iget(mp, NULL, ino, 0, 0, &ip); + if (error) + return error; + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); + error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) { + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + goto out_put; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip); + + error = xfs_itruncate_data(&tp, ip, 0); + if (error) { + xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | + XFS_TRANS_ABORT); + goto out_unlock; + } + + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); + +out_unlock: + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); +out_put: + IRELE(ip); + return error; +} + +int +xfs_qm_scall_trunc_qfiles( + xfs_mount_t *mp, + uint flags) +{ + int error = 0, error2 = 0; + + if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { + xfs_debug(mp, "%s: flags=%x m_qflags=%x\n", + __func__, flags, mp->m_qflags); + return XFS_ERROR(EINVAL); + } + + if (flags & XFS_DQ_USER) + error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); + if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) + error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); + + return error ? error : error2; +} + +/* + * Switch on (a given) quota enforcement for a filesystem. This takes + * effect immediately. + * (Switching on quota accounting must be done at mount time.) + */ +int +xfs_qm_scall_quotaon( + xfs_mount_t *mp, + uint flags) +{ + int error; + uint qf; + __int64_t sbflags; + + flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); + /* + * Switching on quota accounting must be done at mount time. + */ + flags &= ~(XFS_ALL_QUOTA_ACCT); + + sbflags = 0; + + if (flags == 0) { + xfs_debug(mp, "%s: zero flags, m_qflags=%x\n", + __func__, mp->m_qflags); + return XFS_ERROR(EINVAL); + } + + /* No fs can turn on quotas with a delayed effect */ + ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); + + /* + * Can't enforce without accounting. We check the superblock + * qflags here instead of m_qflags because rootfs can have + * quota acct on ondisk without m_qflags' knowing. + */ + if (((flags & XFS_UQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && + (flags & XFS_UQUOTA_ENFD)) + || + ((flags & XFS_PQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && + (flags & XFS_GQUOTA_ACCT) == 0 && + (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && + (flags & XFS_OQUOTA_ENFD))) { + xfs_debug(mp, + "%s: Can't enforce without acct, flags=%x sbflags=%x\n", + __func__, flags, mp->m_sb.sb_qflags); + return XFS_ERROR(EINVAL); + } + /* + * If everything's up to-date incore, then don't waste time. + */ + if ((mp->m_qflags & flags) == flags) + return XFS_ERROR(EEXIST); + + /* + * Change sb_qflags on disk but not incore mp->qflags + * if this is the root filesystem. + */ + spin_lock(&mp->m_sb_lock); + qf = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = qf | flags; + spin_unlock(&mp->m_sb_lock); + + /* + * There's nothing to change if it's the same. + */ + if ((qf & flags) == flags && sbflags == 0) + return XFS_ERROR(EEXIST); + sbflags |= XFS_SB_QFLAGS; + + if ((error = xfs_qm_write_sb_changes(mp, sbflags))) + return (error); + /* + * If we aren't trying to switch on quota enforcement, we are done. + */ + if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != + (mp->m_qflags & XFS_UQUOTA_ACCT)) || + ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != + (mp->m_qflags & XFS_PQUOTA_ACCT)) || + ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != + (mp->m_qflags & XFS_GQUOTA_ACCT)) || + (flags & XFS_ALL_QUOTA_ENFD) == 0) + return (0); + + if (! XFS_IS_QUOTA_RUNNING(mp)) + return XFS_ERROR(ESRCH); + + /* + * Switch on quota enforcement in core. + */ + mutex_lock(&mp->m_quotainfo->qi_quotaofflock); + mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); + mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); + + return (0); +} + + +/* + * Return quota status information, such as uquota-off, enforcements, etc. + */ +int +xfs_qm_scall_getqstat( + struct xfs_mount *mp, + struct fs_quota_stat *out) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + struct xfs_inode *uip, *gip; + boolean_t tempuqip, tempgqip; + + uip = gip = NULL; + tempuqip = tempgqip = B_FALSE; + memset(out, 0, sizeof(fs_quota_stat_t)); + + out->qs_version = FS_QSTAT_VERSION; + if (!xfs_sb_version_hasquota(&mp->m_sb)) { + out->qs_uquota.qfs_ino = NULLFSINO; + out->qs_gquota.qfs_ino = NULLFSINO; + return (0); + } + out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & + (XFS_ALL_QUOTA_ACCT| + XFS_ALL_QUOTA_ENFD)); + out->qs_pad = 0; + out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; + out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; + + if (q) { + uip = q->qi_uquotaip; + gip = q->qi_gquotaip; + } + if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { + if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, + 0, 0, &uip) == 0) + tempuqip = B_TRUE; + } + if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { + if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, + 0, 0, &gip) == 0) + tempgqip = B_TRUE; + } + if (uip) { + out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; + out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; + if (tempuqip) + IRELE(uip); + } + if (gip) { + out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; + out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; + if (tempgqip) + IRELE(gip); + } + if (q) { + out->qs_incoredqs = q->qi_dquots; + out->qs_btimelimit = q->qi_btimelimit; + out->qs_itimelimit = q->qi_itimelimit; + out->qs_rtbtimelimit = q->qi_rtbtimelimit; + out->qs_bwarnlimit = q->qi_bwarnlimit; + out->qs_iwarnlimit = q->qi_iwarnlimit; + } + return 0; +} + +#define XFS_DQ_MASK \ + (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) + +/* + * Adjust quota limits, and start/stop timers accordingly. + */ +int +xfs_qm_scall_setqlim( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + fs_disk_quota_t *newlim) +{ + struct xfs_quotainfo *q = mp->m_quotainfo; + xfs_disk_dquot_t *ddq; + xfs_dquot_t *dqp; + xfs_trans_t *tp; + int error; + xfs_qcnt_t hard, soft; + + if (newlim->d_fieldmask & ~XFS_DQ_MASK) + return EINVAL; + if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) + return 0; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); + if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, + 0, 0, XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + /* + * We don't want to race with a quotaoff so take the quotaoff lock. + * (We don't hold an inode lock, so there's nothing else to stop + * a quotaoff from happening). (XXXThis doesn't currently happen + * because we take the vfslock before calling xfs_qm_sysent). + */ + mutex_lock(&q->qi_quotaofflock); + + /* + * Get the dquot (locked), and join it to the transaction. + * Allocate the dquot if this doesn't exist. + */ + if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { + xfs_trans_cancel(tp, XFS_TRANS_ABORT); + ASSERT(error != ENOENT); + goto out_unlock; + } + xfs_trans_dqjoin(tp, dqp); + ddq = &dqp->q_core; + + /* + * Make sure that hardlimits are >= soft limits before changing. + */ + hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : + be64_to_cpu(ddq->d_blk_hardlimit); + soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : + be64_to_cpu(ddq->d_blk_softlimit); + if (hard == 0 || hard >= soft) { + ddq->d_blk_hardlimit = cpu_to_be64(hard); + ddq->d_blk_softlimit = cpu_to_be64(soft); + if (id == 0) { + q->qi_bhardlimit = hard; + q->qi_bsoftlimit = soft; + } + } else { + xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); + } + hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : + be64_to_cpu(ddq->d_rtb_hardlimit); + soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? + (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : + be64_to_cpu(ddq->d_rtb_softlimit); + if (hard == 0 || hard >= soft) { + ddq->d_rtb_hardlimit = cpu_to_be64(hard); + ddq->d_rtb_softlimit = cpu_to_be64(soft); + if (id == 0) { + q->qi_rtbhardlimit = hard; + q->qi_rtbsoftlimit = soft; + } + } else { + xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); + } + + hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? + (xfs_qcnt_t) newlim->d_ino_hardlimit : + be64_to_cpu(ddq->d_ino_hardlimit); + soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? + (xfs_qcnt_t) newlim->d_ino_softlimit : + be64_to_cpu(ddq->d_ino_softlimit); + if (hard == 0 || hard >= soft) { + ddq->d_ino_hardlimit = cpu_to_be64(hard); + ddq->d_ino_softlimit = cpu_to_be64(soft); + if (id == 0) { + q->qi_ihardlimit = hard; + q->qi_isoftlimit = soft; + } + } else { + xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); + } + + /* + * Update warnings counter(s) if requested + */ + if (newlim->d_fieldmask & FS_DQ_BWARNS) + ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); + if (newlim->d_fieldmask & FS_DQ_IWARNS) + ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); + if (newlim->d_fieldmask & FS_DQ_RTBWARNS) + ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); + + if (id == 0) { + /* + * Timelimits for the super user set the relative time + * the other users can be over quota for this file system. + * If it is zero a default is used. Ditto for the default + * soft and hard limit values (already done, above), and + * for warnings. + */ + if (newlim->d_fieldmask & FS_DQ_BTIMER) { + q->qi_btimelimit = newlim->d_btimer; + ddq->d_btimer = cpu_to_be32(newlim->d_btimer); + } + if (newlim->d_fieldmask & FS_DQ_ITIMER) { + q->qi_itimelimit = newlim->d_itimer; + ddq->d_itimer = cpu_to_be32(newlim->d_itimer); + } + if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { + q->qi_rtbtimelimit = newlim->d_rtbtimer; + ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); + } + if (newlim->d_fieldmask & FS_DQ_BWARNS) + q->qi_bwarnlimit = newlim->d_bwarns; + if (newlim->d_fieldmask & FS_DQ_IWARNS) + q->qi_iwarnlimit = newlim->d_iwarns; + if (newlim->d_fieldmask & FS_DQ_RTBWARNS) + q->qi_rtbwarnlimit = newlim->d_rtbwarns; + } else { + /* + * If the user is now over quota, start the timelimit. + * The user will not be 'warned'. + * Note that we keep the timers ticking, whether enforcement + * is on or off. We don't really want to bother with iterating + * over all ondisk dquots and turning the timers on/off. + */ + xfs_qm_adjust_dqtimers(mp, ddq); + } + dqp->dq_flags |= XFS_DQ_DIRTY; + xfs_trans_log_dquot(tp, dqp); + + error = xfs_trans_commit(tp, 0); + xfs_qm_dqrele(dqp); + + out_unlock: + mutex_unlock(&q->qi_quotaofflock); + return error; +} + +int +xfs_qm_scall_getquota( + xfs_mount_t *mp, + xfs_dqid_t id, + uint type, + fs_disk_quota_t *out) +{ + xfs_dquot_t *dqp; + int error; + + /* + * Try to get the dquot. We don't want it allocated on disk, so + * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't + * exist, we'll get ENOENT back. + */ + if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) { + return (error); + } + + /* + * If everything's NULL, this dquot doesn't quite exist as far as + * our utility programs are concerned. + */ + if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { + xfs_qm_dqput(dqp); + return XFS_ERROR(ENOENT); + } + /* + * Convert the disk dquot to the exportable format + */ + xfs_qm_export_dquot(mp, &dqp->q_core, out); + xfs_qm_dqput(dqp); + return (error ? XFS_ERROR(EFAULT) : 0); +} + + +STATIC int +xfs_qm_log_quotaoff_end( + xfs_mount_t *mp, + xfs_qoff_logitem_t *startqoff, + uint flags) +{ + xfs_trans_t *tp; + int error; + xfs_qoff_logitem_t *qoffi; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); + + if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2, + 0, 0, XFS_DEFAULT_LOG_COUNT))) { + xfs_trans_cancel(tp, 0); + return (error); + } + + qoffi = xfs_trans_get_qoff_item(tp, startqoff, + flags & XFS_ALL_QUOTA_ACCT); + xfs_trans_log_quotaoff_item(tp, qoffi); + + /* + * We have to make sure that the transaction is secure on disk before we + * return and actually stop quota accounting. So, make it synchronous. + * We don't care about quotoff's performance. + */ + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + return (error); +} + + +STATIC int +xfs_qm_log_quotaoff( + xfs_mount_t *mp, + xfs_qoff_logitem_t **qoffstartp, + uint flags) +{ + xfs_trans_t *tp; + int error; + xfs_qoff_logitem_t *qoffi=NULL; + uint oldsbqflag=0; + + tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); + if ((error = xfs_trans_reserve(tp, 0, + sizeof(xfs_qoff_logitem_t) * 2 + + mp->m_sb.sb_sectsize + 128, + 0, + 0, + XFS_DEFAULT_LOG_COUNT))) { + goto error0; + } + + qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); + xfs_trans_log_quotaoff_item(tp, qoffi); + + spin_lock(&mp->m_sb_lock); + oldsbqflag = mp->m_sb.sb_qflags; + mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; + spin_unlock(&mp->m_sb_lock); + + xfs_mod_sb(tp, XFS_SB_QFLAGS); + + /* + * We have to make sure that the transaction is secure on disk before we + * return and actually stop quota accounting. So, make it synchronous. + * We don't care about quotoff's performance. + */ + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + +error0: + if (error) { + xfs_trans_cancel(tp, 0); + /* + * No one else is modifying sb_qflags, so this is OK. + * We still hold the quotaofflock. + */ + spin_lock(&mp->m_sb_lock); + mp->m_sb.sb_qflags = oldsbqflag; + spin_unlock(&mp->m_sb_lock); + } + *qoffstartp = qoffi; + return (error); +} + + +/* + * Translate an internal style on-disk-dquot to the exportable format. + * The main differences are that the counters/limits are all in Basic + * Blocks (BBs) instead of the internal FSBs, and all on-disk data has + * to be converted to the native endianness. + */ +STATIC void +xfs_qm_export_dquot( + xfs_mount_t *mp, + xfs_disk_dquot_t *src, + struct fs_disk_quota *dst) +{ + memset(dst, 0, sizeof(*dst)); + dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */ + dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags); + dst->d_id = be32_to_cpu(src->d_id); + dst->d_blk_hardlimit = + XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit)); + dst->d_blk_softlimit = + XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit)); + dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit); + dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit); + dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount)); + dst->d_icount = be64_to_cpu(src->d_icount); + dst->d_btimer = be32_to_cpu(src->d_btimer); + dst->d_itimer = be32_to_cpu(src->d_itimer); + dst->d_iwarns = be16_to_cpu(src->d_iwarns); + dst->d_bwarns = be16_to_cpu(src->d_bwarns); + dst->d_rtb_hardlimit = + XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit)); + dst->d_rtb_softlimit = + XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit)); + dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount)); + dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer); + dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns); + + /* + * Internally, we don't reset all the timers when quota enforcement + * gets turned off. No need to confuse the user level code, + * so return zeroes in that case. + */ + if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) || + (!XFS_IS_OQUOTA_ENFORCED(mp) && + (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { + dst->d_btimer = 0; + dst->d_itimer = 0; + dst->d_rtbtimer = 0; + } + +#ifdef DEBUG + if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || + (XFS_IS_OQUOTA_ENFORCED(mp) && + (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && + dst->d_id != 0) { + if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) && + (dst->d_blk_softlimit > 0)) { + ASSERT(dst->d_btimer != 0); + } + if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) && + (dst->d_ino_softlimit > 0)) { + ASSERT(dst->d_itimer != 0); + } + } +#endif +} + +STATIC uint +xfs_qm_export_qtype_flags( + uint flags) +{ + /* + * Can't be more than one, or none. + */ + ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != + (FS_PROJ_QUOTA | FS_USER_QUOTA)); + ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != + (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); + ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != + (FS_USER_QUOTA | FS_GROUP_QUOTA)); + ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); + + return (flags & XFS_DQ_USER) ? + FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? + FS_PROJ_QUOTA : FS_GROUP_QUOTA; +} + +STATIC uint +xfs_qm_export_flags( + uint flags) +{ + uint uflags; + + uflags = 0; + if (flags & XFS_UQUOTA_ACCT) + uflags |= FS_QUOTA_UDQ_ACCT; + if (flags & XFS_PQUOTA_ACCT) + uflags |= FS_QUOTA_PDQ_ACCT; + if (flags & XFS_GQUOTA_ACCT) + uflags |= FS_QUOTA_GDQ_ACCT; + if (flags & XFS_UQUOTA_ENFD) + uflags |= FS_QUOTA_UDQ_ENFD; + if (flags & (XFS_OQUOTA_ENFD)) { + uflags |= (flags & XFS_GQUOTA_ACCT) ? + FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD; + } + return (uflags); +} + + +STATIC int +xfs_dqrele_inode( + struct xfs_inode *ip, + struct xfs_perag *pag, + int flags) +{ + /* skip quota inodes */ + if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || + ip == ip->i_mount->m_quotainfo->qi_gquotaip) { + ASSERT(ip->i_udquot == NULL); + ASSERT(ip->i_gdquot == NULL); + return 0; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { + xfs_qm_dqrele(ip->i_udquot); + ip->i_udquot = NULL; + } + if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { + xfs_qm_dqrele(ip->i_gdquot); + ip->i_gdquot = NULL; + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return 0; +} + + +/* + * Go thru all the inodes in the file system, releasing their dquots. + * + * Note that the mount structure gets modified to indicate that quotas are off + * AFTER this, in the case of quotaoff. + */ +void +xfs_qm_dqrele_all_inodes( + struct xfs_mount *mp, + uint flags) +{ + ASSERT(mp->m_quotainfo); + xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags); +} diff --git a/fs/xfs/xfs_quota_priv.h b/fs/xfs/xfs_quota_priv.h new file mode 100644 index 0000000..94a3d92 --- /dev/null +++ b/fs/xfs/xfs_quota_priv.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2000-2003 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_QUOTA_PRIV_H__ +#define __XFS_QUOTA_PRIV_H__ + +/* + * Number of bmaps that we ask from bmapi when doing a quotacheck. + * We make this restriction to keep the memory usage to a minimum. + */ +#define XFS_DQITER_MAP_SIZE 10 + +/* + * Hash into a bucket in the dquot hash table, based on . + */ +#define XFS_DQ_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ + (__psunsigned_t)(id)) & \ + (xfs_Gqm->qm_dqhashmask - 1)) +#define XFS_DQ_HASH(mp, id, type) (type == XFS_DQ_USER ? \ + (xfs_Gqm->qm_usr_dqhtable + \ + XFS_DQ_HASHVAL(mp, id)) : \ + (xfs_Gqm->qm_grp_dqhtable + \ + XFS_DQ_HASHVAL(mp, id))) +#define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ + !dqp->q_core.d_blk_hardlimit && \ + !dqp->q_core.d_blk_softlimit && \ + !dqp->q_core.d_rtb_hardlimit && \ + !dqp->q_core.d_rtb_softlimit && \ + !dqp->q_core.d_ino_hardlimit && \ + !dqp->q_core.d_ino_softlimit && \ + !dqp->q_core.d_bcount && \ + !dqp->q_core.d_rtbcount && \ + !dqp->q_core.d_icount) + +#define DQFLAGTO_TYPESTR(d) (((d)->dq_flags & XFS_DQ_USER) ? "USR" : \ + (((d)->dq_flags & XFS_DQ_GROUP) ? "GRP" : \ + (((d)->dq_flags & XFS_DQ_PROJ) ? "PRJ":"???"))) + +#endif /* __XFS_QUOTA_PRIV_H__ */ diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c new file mode 100644 index 0000000..7e76f53 --- /dev/null +++ b/fs/xfs/xfs_quotaops.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2008, Christoph Hellwig + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_sb.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_quota.h" +#include "xfs_trans.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_qm.h" +#include + + +STATIC int +xfs_quota_type(int type) +{ + switch (type) { + case USRQUOTA: + return XFS_DQ_USER; + case GRPQUOTA: + return XFS_DQ_GROUP; + default: + return XFS_DQ_PROJ; + } +} + +STATIC int +xfs_fs_get_xstate( + struct super_block *sb, + struct fs_quota_stat *fqs) +{ + struct xfs_mount *mp = XFS_M(sb); + + if (!XFS_IS_QUOTA_RUNNING(mp)) + return -ENOSYS; + return -xfs_qm_scall_getqstat(mp, fqs); +} + +STATIC int +xfs_fs_set_xstate( + struct super_block *sb, + unsigned int uflags, + int op) +{ + struct xfs_mount *mp = XFS_M(sb); + unsigned int flags = 0; + + if (sb->s_flags & MS_RDONLY) + return -EROFS; + if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) + return -ENOSYS; + + if (uflags & FS_QUOTA_UDQ_ACCT) + flags |= XFS_UQUOTA_ACCT; + if (uflags & FS_QUOTA_PDQ_ACCT) + flags |= XFS_PQUOTA_ACCT; + if (uflags & FS_QUOTA_GDQ_ACCT) + flags |= XFS_GQUOTA_ACCT; + if (uflags & FS_QUOTA_UDQ_ENFD) + flags |= XFS_UQUOTA_ENFD; + if (uflags & (FS_QUOTA_PDQ_ENFD|FS_QUOTA_GDQ_ENFD)) + flags |= XFS_OQUOTA_ENFD; + + switch (op) { + case Q_XQUOTAON: + return -xfs_qm_scall_quotaon(mp, flags); + case Q_XQUOTAOFF: + if (!XFS_IS_QUOTA_ON(mp)) + return -EINVAL; + return -xfs_qm_scall_quotaoff(mp, flags); + case Q_XQUOTARM: + if (XFS_IS_QUOTA_ON(mp)) + return -EINVAL; + return -xfs_qm_scall_trunc_qfiles(mp, flags); + } + + return -EINVAL; +} + +STATIC int +xfs_fs_get_dqblk( + struct super_block *sb, + int type, + qid_t id, + struct fs_disk_quota *fdq) +{ + struct xfs_mount *mp = XFS_M(sb); + + if (!XFS_IS_QUOTA_RUNNING(mp)) + return -ENOSYS; + if (!XFS_IS_QUOTA_ON(mp)) + return -ESRCH; + + return -xfs_qm_scall_getquota(mp, id, xfs_quota_type(type), fdq); +} + +STATIC int +xfs_fs_set_dqblk( + struct super_block *sb, + int type, + qid_t id, + struct fs_disk_quota *fdq) +{ + struct xfs_mount *mp = XFS_M(sb); + + if (sb->s_flags & MS_RDONLY) + return -EROFS; + if (!XFS_IS_QUOTA_RUNNING(mp)) + return -ENOSYS; + if (!XFS_IS_QUOTA_ON(mp)) + return -ESRCH; + + return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); +} + +const struct quotactl_ops xfs_quotactl_operations = { + .get_xstate = xfs_fs_get_xstate, + .set_xstate = xfs_fs_set_xstate, + .get_dqblk = xfs_fs_get_dqblk, + .set_dqblk = xfs_fs_set_dqblk, +}; diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c new file mode 100644 index 0000000..76fdc58 --- /dev/null +++ b/fs/xfs/xfs_stats.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include + +DEFINE_PER_CPU(struct xfsstats, xfsstats); + +static int xfs_stat_proc_show(struct seq_file *m, void *v) +{ + int c, i, j, val; + __uint64_t xs_xstrat_bytes = 0; + __uint64_t xs_write_bytes = 0; + __uint64_t xs_read_bytes = 0; + + static const struct xstats_entry { + char *desc; + int endpoint; + } xstats[] = { + { "extent_alloc", XFSSTAT_END_EXTENT_ALLOC }, + { "abt", XFSSTAT_END_ALLOC_BTREE }, + { "blk_map", XFSSTAT_END_BLOCK_MAPPING }, + { "bmbt", XFSSTAT_END_BLOCK_MAP_BTREE }, + { "dir", XFSSTAT_END_DIRECTORY_OPS }, + { "trans", XFSSTAT_END_TRANSACTIONS }, + { "ig", XFSSTAT_END_INODE_OPS }, + { "log", XFSSTAT_END_LOG_OPS }, + { "push_ail", XFSSTAT_END_TAIL_PUSHING }, + { "xstrat", XFSSTAT_END_WRITE_CONVERT }, + { "rw", XFSSTAT_END_READ_WRITE_OPS }, + { "attr", XFSSTAT_END_ATTRIBUTE_OPS }, + { "icluster", XFSSTAT_END_INODE_CLUSTER }, + { "vnodes", XFSSTAT_END_VNODE_OPS }, + { "buf", XFSSTAT_END_BUF }, + { "abtb2", XFSSTAT_END_ABTB_V2 }, + { "abtc2", XFSSTAT_END_ABTC_V2 }, + { "bmbt2", XFSSTAT_END_BMBT_V2 }, + { "ibt2", XFSSTAT_END_IBT_V2 }, + }; + + /* Loop over all stats groups */ + for (i=j = 0; i < ARRAY_SIZE(xstats); i++) { + seq_printf(m, "%s", xstats[i].desc); + /* inner loop does each group */ + while (j < xstats[i].endpoint) { + val = 0; + /* sum over all cpus */ + for_each_possible_cpu(c) + val += *(((__u32*)&per_cpu(xfsstats, c) + j)); + seq_printf(m, " %u", val); + j++; + } + seq_putc(m, '\n'); + } + /* extra precision counters */ + for_each_possible_cpu(i) { + xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; + xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; + xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; + } + + seq_printf(m, "xpc %Lu %Lu %Lu\n", + xs_xstrat_bytes, xs_write_bytes, xs_read_bytes); + seq_printf(m, "debug %u\n", +#if defined(DEBUG) + 1); +#else + 0); +#endif + return 0; +} + +static int xfs_stat_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, xfs_stat_proc_show, NULL); +} + +static const struct file_operations xfs_stat_proc_fops = { + .owner = THIS_MODULE, + .open = xfs_stat_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int +xfs_init_procfs(void) +{ + if (!proc_mkdir("fs/xfs", NULL)) + goto out; + + if (!proc_create("fs/xfs/stat", 0, NULL, + &xfs_stat_proc_fops)) + goto out_remove_entry; + return 0; + + out_remove_entry: + remove_proc_entry("fs/xfs", NULL); + out: + return -ENOMEM; +} + +void +xfs_cleanup_procfs(void) +{ + remove_proc_entry("fs/xfs/stat", NULL); + remove_proc_entry("fs/xfs", NULL); +} diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h new file mode 100644 index 0000000..736854b --- /dev/null +++ b/fs/xfs/xfs_stats.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2000,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_STATS_H__ +#define __XFS_STATS_H__ + + +#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF) + +#include + +/* + * XFS global statistics + */ +struct xfsstats { +# define XFSSTAT_END_EXTENT_ALLOC 4 + __uint32_t xs_allocx; + __uint32_t xs_allocb; + __uint32_t xs_freex; + __uint32_t xs_freeb; +# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4) + __uint32_t xs_abt_lookup; + __uint32_t xs_abt_compare; + __uint32_t xs_abt_insrec; + __uint32_t xs_abt_delrec; +# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7) + __uint32_t xs_blk_mapr; + __uint32_t xs_blk_mapw; + __uint32_t xs_blk_unmap; + __uint32_t xs_add_exlist; + __uint32_t xs_del_exlist; + __uint32_t xs_look_exlist; + __uint32_t xs_cmp_exlist; +# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4) + __uint32_t xs_bmbt_lookup; + __uint32_t xs_bmbt_compare; + __uint32_t xs_bmbt_insrec; + __uint32_t xs_bmbt_delrec; +# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4) + __uint32_t xs_dir_lookup; + __uint32_t xs_dir_create; + __uint32_t xs_dir_remove; + __uint32_t xs_dir_getdents; +# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3) + __uint32_t xs_trans_sync; + __uint32_t xs_trans_async; + __uint32_t xs_trans_empty; +# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7) + __uint32_t xs_ig_attempts; + __uint32_t xs_ig_found; + __uint32_t xs_ig_frecycle; + __uint32_t xs_ig_missed; + __uint32_t xs_ig_dup; + __uint32_t xs_ig_reclaims; + __uint32_t xs_ig_attrchg; +# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5) + __uint32_t xs_log_writes; + __uint32_t xs_log_blocks; + __uint32_t xs_log_noiclogs; + __uint32_t xs_log_force; + __uint32_t xs_log_force_sleep; +# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10) + __uint32_t xs_try_logspace; + __uint32_t xs_sleep_logspace; + __uint32_t xs_push_ail; + __uint32_t xs_push_ail_success; + __uint32_t xs_push_ail_pushbuf; + __uint32_t xs_push_ail_pinned; + __uint32_t xs_push_ail_locked; + __uint32_t xs_push_ail_flushing; + __uint32_t xs_push_ail_restarts; + __uint32_t xs_push_ail_flush; +# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2) + __uint32_t xs_xstrat_quick; + __uint32_t xs_xstrat_split; +# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2) + __uint32_t xs_write_calls; + __uint32_t xs_read_calls; +# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4) + __uint32_t xs_attr_get; + __uint32_t xs_attr_set; + __uint32_t xs_attr_remove; + __uint32_t xs_attr_list; +# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3) + __uint32_t xs_iflush_count; + __uint32_t xs_icluster_flushcnt; + __uint32_t xs_icluster_flushinode; +# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8) + __uint32_t vn_active; /* # vnodes not on free lists */ + __uint32_t vn_alloc; /* # times vn_alloc called */ + __uint32_t vn_get; /* # times vn_get called */ + __uint32_t vn_hold; /* # times vn_hold called */ + __uint32_t vn_rele; /* # times vn_rele called */ + __uint32_t vn_reclaim; /* # times vn_reclaim called */ + __uint32_t vn_remove; /* # times vn_remove called */ + __uint32_t vn_free; /* # times vn_free called */ +#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9) + __uint32_t xb_get; + __uint32_t xb_create; + __uint32_t xb_get_locked; + __uint32_t xb_get_locked_waited; + __uint32_t xb_busy_locked; + __uint32_t xb_miss_locked; + __uint32_t xb_page_retries; + __uint32_t xb_page_found; + __uint32_t xb_get_read; +/* Version 2 btree counters */ +#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF+15) + __uint32_t xs_abtb_2_lookup; + __uint32_t xs_abtb_2_compare; + __uint32_t xs_abtb_2_insrec; + __uint32_t xs_abtb_2_delrec; + __uint32_t xs_abtb_2_newroot; + __uint32_t xs_abtb_2_killroot; + __uint32_t xs_abtb_2_increment; + __uint32_t xs_abtb_2_decrement; + __uint32_t xs_abtb_2_lshift; + __uint32_t xs_abtb_2_rshift; + __uint32_t xs_abtb_2_split; + __uint32_t xs_abtb_2_join; + __uint32_t xs_abtb_2_alloc; + __uint32_t xs_abtb_2_free; + __uint32_t xs_abtb_2_moves; +#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2+15) + __uint32_t xs_abtc_2_lookup; + __uint32_t xs_abtc_2_compare; + __uint32_t xs_abtc_2_insrec; + __uint32_t xs_abtc_2_delrec; + __uint32_t xs_abtc_2_newroot; + __uint32_t xs_abtc_2_killroot; + __uint32_t xs_abtc_2_increment; + __uint32_t xs_abtc_2_decrement; + __uint32_t xs_abtc_2_lshift; + __uint32_t xs_abtc_2_rshift; + __uint32_t xs_abtc_2_split; + __uint32_t xs_abtc_2_join; + __uint32_t xs_abtc_2_alloc; + __uint32_t xs_abtc_2_free; + __uint32_t xs_abtc_2_moves; +#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2+15) + __uint32_t xs_bmbt_2_lookup; + __uint32_t xs_bmbt_2_compare; + __uint32_t xs_bmbt_2_insrec; + __uint32_t xs_bmbt_2_delrec; + __uint32_t xs_bmbt_2_newroot; + __uint32_t xs_bmbt_2_killroot; + __uint32_t xs_bmbt_2_increment; + __uint32_t xs_bmbt_2_decrement; + __uint32_t xs_bmbt_2_lshift; + __uint32_t xs_bmbt_2_rshift; + __uint32_t xs_bmbt_2_split; + __uint32_t xs_bmbt_2_join; + __uint32_t xs_bmbt_2_alloc; + __uint32_t xs_bmbt_2_free; + __uint32_t xs_bmbt_2_moves; +#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2+15) + __uint32_t xs_ibt_2_lookup; + __uint32_t xs_ibt_2_compare; + __uint32_t xs_ibt_2_insrec; + __uint32_t xs_ibt_2_delrec; + __uint32_t xs_ibt_2_newroot; + __uint32_t xs_ibt_2_killroot; + __uint32_t xs_ibt_2_increment; + __uint32_t xs_ibt_2_decrement; + __uint32_t xs_ibt_2_lshift; + __uint32_t xs_ibt_2_rshift; + __uint32_t xs_ibt_2_split; + __uint32_t xs_ibt_2_join; + __uint32_t xs_ibt_2_alloc; + __uint32_t xs_ibt_2_free; + __uint32_t xs_ibt_2_moves; +/* Extra precision counters */ + __uint64_t xs_xstrat_bytes; + __uint64_t xs_write_bytes; + __uint64_t xs_read_bytes; +}; + +DECLARE_PER_CPU(struct xfsstats, xfsstats); + +/* + * We don't disable preempt, not too worried about poking the + * wrong CPU's stat for now (also aggregated before reporting). + */ +#define XFS_STATS_INC(v) (per_cpu(xfsstats, current_cpu()).v++) +#define XFS_STATS_DEC(v) (per_cpu(xfsstats, current_cpu()).v--) +#define XFS_STATS_ADD(v, inc) (per_cpu(xfsstats, current_cpu()).v += (inc)) + +extern int xfs_init_procfs(void); +extern void xfs_cleanup_procfs(void); + + +#else /* !CONFIG_PROC_FS */ + +# define XFS_STATS_INC(count) +# define XFS_STATS_DEC(count) +# define XFS_STATS_ADD(count, inc) + +static inline int xfs_init_procfs(void) +{ + return 0; +} + +static inline void xfs_cleanup_procfs(void) +{ +} + +#endif /* !CONFIG_PROC_FS */ + +#endif /* __XFS_STATS_H__ */ diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c new file mode 100644 index 0000000..9a72dda --- /dev/null +++ b/fs/xfs/xfs_super.c @@ -0,0 +1,1773 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "xfs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir2.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "xfs_ialloc.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_itable.h" +#include "xfs_fsops.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_utils.h" +#include "xfs_vnodeops.h" +#include "xfs_log_priv.h" +#include "xfs_trans_priv.h" +#include "xfs_filestream.h" +#include "xfs_da_btree.h" +#include "xfs_extfree_item.h" +#include "xfs_mru_cache.h" +#include "xfs_inode_item.h" +#include "xfs_sync.h" +#include "xfs_trace.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct super_operations xfs_super_operations; +static kmem_zone_t *xfs_ioend_zone; +mempool_t *xfs_ioend_pool; + +#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ +#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ +#define MNTOPT_LOGDEV "logdev" /* log device */ +#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ +#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ +#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ +#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ +#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ +#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ +#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ +#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ +#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ +#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ +#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ +#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ +#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ +#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ +#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ +#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and + * unwritten extent conversion */ +#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ +#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ +#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ +#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ +#define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ +#define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes + * in stat(). */ +#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ +#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ +#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ +#define MNTOPT_QUOTA "quota" /* disk quotas (user) */ +#define MNTOPT_NOQUOTA "noquota" /* no quotas */ +#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ +#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ +#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ +#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ +#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ +#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ +#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ +#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ +#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ +#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ +#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ +#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ +#define MNTOPT_DISCARD "discard" /* Discard unused blocks */ +#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ + +/* + * Table driven mount option parser. + * + * Currently only used for remount, but it will be used for mount + * in the future, too. + */ +enum { + Opt_barrier, Opt_nobarrier, Opt_err +}; + +static const match_table_t tokens = { + {Opt_barrier, "barrier"}, + {Opt_nobarrier, "nobarrier"}, + {Opt_err, NULL} +}; + + +STATIC unsigned long +suffix_strtoul(char *s, char **endp, unsigned int base) +{ + int last, shift_left_factor = 0; + char *value = s; + + last = strlen(value) - 1; + if (value[last] == 'K' || value[last] == 'k') { + shift_left_factor = 10; + value[last] = '\0'; + } + if (value[last] == 'M' || value[last] == 'm') { + shift_left_factor = 20; + value[last] = '\0'; + } + if (value[last] == 'G' || value[last] == 'g') { + shift_left_factor = 30; + value[last] = '\0'; + } + + return simple_strtoul((const char *)s, endp, base) << shift_left_factor; +} + +/* + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock has _not_ yet been read in. + * + * Note that this function leaks the various device name allocations on + * failure. The caller takes care of them. + */ +STATIC int +xfs_parseargs( + struct xfs_mount *mp, + char *options) +{ + struct super_block *sb = mp->m_super; + char *this_char, *value, *eov; + int dsunit = 0; + int dswidth = 0; + int iosize = 0; + __uint8_t iosizelog = 0; + + /* + * set up the mount name first so all the errors will refer to the + * correct device. + */ + mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); + if (!mp->m_fsname) + return ENOMEM; + mp->m_fsname_len = strlen(mp->m_fsname) + 1; + + /* + * Copy binary VFS mount flags we are interested in. + */ + if (sb->s_flags & MS_RDONLY) + mp->m_flags |= XFS_MOUNT_RDONLY; + if (sb->s_flags & MS_DIRSYNC) + mp->m_flags |= XFS_MOUNT_DIRSYNC; + if (sb->s_flags & MS_SYNCHRONOUS) + mp->m_flags |= XFS_MOUNT_WSYNC; + + /* + * Set some default flags that could be cleared by the mount option + * parsing. + */ + mp->m_flags |= XFS_MOUNT_BARRIER; + mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; + mp->m_flags |= XFS_MOUNT_SMALL_INUMS; + mp->m_flags |= XFS_MOUNT_DELAYLOG; + + /* + * These can be overridden by the mount option parsing. + */ + mp->m_logbufs = -1; + mp->m_logbsize = -1; + + if (!options) + goto done; + + while ((this_char = strsep(&options, ",")) != NULL) { + if (!*this_char) + continue; + if ((value = strchr(this_char, '=')) != NULL) + *value++ = 0; + + if (!strcmp(this_char, MNTOPT_LOGBUFS)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + mp->m_logbufs = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + mp->m_logbsize = suffix_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); + if (!mp->m_logname) + return ENOMEM; + } else if (!strcmp(this_char, MNTOPT_MTPT)) { + xfs_warn(mp, "%s option not allowed on this system", + this_char); + return EINVAL; + } else if (!strcmp(this_char, MNTOPT_RTDEV)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); + if (!mp->m_rtname) + return ENOMEM; + } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + iosize = simple_strtoul(value, &eov, 10); + iosizelog = ffs(iosize) - 1; + } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + iosize = suffix_strtoul(value, &eov, 10); + iosizelog = ffs(iosize) - 1; + } else if (!strcmp(this_char, MNTOPT_GRPID) || + !strcmp(this_char, MNTOPT_BSDGROUPS)) { + mp->m_flags |= XFS_MOUNT_GRPID; + } else if (!strcmp(this_char, MNTOPT_NOGRPID) || + !strcmp(this_char, MNTOPT_SYSVGROUPS)) { + mp->m_flags &= ~XFS_MOUNT_GRPID; + } else if (!strcmp(this_char, MNTOPT_WSYNC)) { + mp->m_flags |= XFS_MOUNT_WSYNC; + } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { + mp->m_flags |= XFS_MOUNT_NORECOVERY; + } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { + mp->m_flags |= XFS_MOUNT_NOALIGN; + } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { + mp->m_flags |= XFS_MOUNT_SWALLOC; + } else if (!strcmp(this_char, MNTOPT_SUNIT)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + dsunit = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { + if (!value || !*value) { + xfs_warn(mp, "%s option requires an argument", + this_char); + return EINVAL; + } + dswidth = simple_strtoul(value, &eov, 10); + } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { + mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; +#if !XFS_BIG_INUMS + xfs_warn(mp, "%s option not allowed on this system", + this_char); + return EINVAL; +#endif + } else if (!strcmp(this_char, MNTOPT_NOUUID)) { + mp->m_flags |= XFS_MOUNT_NOUUID; + } else if (!strcmp(this_char, MNTOPT_BARRIER)) { + mp->m_flags |= XFS_MOUNT_BARRIER; + } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { + mp->m_flags &= ~XFS_MOUNT_BARRIER; + } else if (!strcmp(this_char, MNTOPT_IKEEP)) { + mp->m_flags |= XFS_MOUNT_IKEEP; + } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { + mp->m_flags &= ~XFS_MOUNT_IKEEP; + } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { + mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; + } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { + mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; + } else if (!strcmp(this_char, MNTOPT_ATTR2)) { + mp->m_flags |= XFS_MOUNT_ATTR2; + } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { + mp->m_flags &= ~XFS_MOUNT_ATTR2; + mp->m_flags |= XFS_MOUNT_NOATTR2; + } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { + mp->m_flags |= XFS_MOUNT_FILESTREAMS; + } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { + mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | + XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | + XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); + } else if (!strcmp(this_char, MNTOPT_QUOTA) || + !strcmp(this_char, MNTOPT_UQUOTA) || + !strcmp(this_char, MNTOPT_USRQUOTA)) { + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | + XFS_UQUOTA_ENFD); + } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || + !strcmp(this_char, MNTOPT_UQUOTANOENF)) { + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); + mp->m_qflags &= ~XFS_UQUOTA_ENFD; + } else if (!strcmp(this_char, MNTOPT_PQUOTA) || + !strcmp(this_char, MNTOPT_PRJQUOTA)) { + mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | + XFS_OQUOTA_ENFD); + } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { + mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); + mp->m_qflags &= ~XFS_OQUOTA_ENFD; + } else if (!strcmp(this_char, MNTOPT_GQUOTA) || + !strcmp(this_char, MNTOPT_GRPQUOTA)) { + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + XFS_OQUOTA_ENFD); + } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); + mp->m_qflags &= ~XFS_OQUOTA_ENFD; + } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) { + mp->m_flags |= XFS_MOUNT_DELAYLOG; + } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { + mp->m_flags &= ~XFS_MOUNT_DELAYLOG; + } else if (!strcmp(this_char, MNTOPT_DISCARD)) { + mp->m_flags |= XFS_MOUNT_DISCARD; + } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { + mp->m_flags &= ~XFS_MOUNT_DISCARD; + } else if (!strcmp(this_char, "ihashsize")) { + xfs_warn(mp, + "ihashsize no longer used, option is deprecated."); + } else if (!strcmp(this_char, "osyncisdsync")) { + xfs_warn(mp, + "osyncisdsync has no effect, option is deprecated."); + } else if (!strcmp(this_char, "osyncisosync")) { + xfs_warn(mp, + "osyncisosync has no effect, option is deprecated."); + } else if (!strcmp(this_char, "irixsgid")) { + xfs_warn(mp, + "irixsgid is now a sysctl(2) variable, option is deprecated."); + } else { + xfs_warn(mp, "unknown mount option [%s].", this_char); + return EINVAL; + } + } + + /* + * no recovery flag requires a read-only mount + */ + if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && + !(mp->m_flags & XFS_MOUNT_RDONLY)) { + xfs_warn(mp, "no-recovery mounts must be read-only."); + return EINVAL; + } + + if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { + xfs_warn(mp, + "sunit and swidth options incompatible with the noalign option"); + return EINVAL; + } + + if ((mp->m_flags & XFS_MOUNT_DISCARD) && + !(mp->m_flags & XFS_MOUNT_DELAYLOG)) { + xfs_warn(mp, + "the discard option is incompatible with the nodelaylog option"); + return EINVAL; + } + +#ifndef CONFIG_XFS_QUOTA + if (XFS_IS_QUOTA_RUNNING(mp)) { + xfs_warn(mp, "quota support not available in this kernel."); + return EINVAL; + } +#endif + + if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && + (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { + xfs_warn(mp, "cannot mount with both project and group quota"); + return EINVAL; + } + + if ((dsunit && !dswidth) || (!dsunit && dswidth)) { + xfs_warn(mp, "sunit and swidth must be specified together"); + return EINVAL; + } + + if (dsunit && (dswidth % dsunit != 0)) { + xfs_warn(mp, + "stripe width (%d) must be a multiple of the stripe unit (%d)", + dswidth, dsunit); + return EINVAL; + } + +done: + if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { + /* + * At this point the superblock has not been read + * in, therefore we do not know the block size. + * Before the mount call ends we will convert + * these to FSBs. + */ + if (dsunit) { + mp->m_dalign = dsunit; + mp->m_flags |= XFS_MOUNT_RETERR; + } + + if (dswidth) + mp->m_swidth = dswidth; + } + + if (mp->m_logbufs != -1 && + mp->m_logbufs != 0 && + (mp->m_logbufs < XLOG_MIN_ICLOGS || + mp->m_logbufs > XLOG_MAX_ICLOGS)) { + xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", + mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); + return XFS_ERROR(EINVAL); + } + if (mp->m_logbsize != -1 && + mp->m_logbsize != 0 && + (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || + mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || + !is_power_of_2(mp->m_logbsize))) { + xfs_warn(mp, + "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", + mp->m_logbsize); + return XFS_ERROR(EINVAL); + } + + if (iosizelog) { + if (iosizelog > XFS_MAX_IO_LOG || + iosizelog < XFS_MIN_IO_LOG) { + xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", + iosizelog, XFS_MIN_IO_LOG, + XFS_MAX_IO_LOG); + return XFS_ERROR(EINVAL); + } + + mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; + mp->m_readio_log = iosizelog; + mp->m_writeio_log = iosizelog; + } + + return 0; +} + +struct proc_xfs_info { + int flag; + char *str; +}; + +STATIC int +xfs_showargs( + struct xfs_mount *mp, + struct seq_file *m) +{ + static struct proc_xfs_info xfs_info_set[] = { + /* the few simple ones we can get from the mount struct */ + { XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP }, + { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, + { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, + { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, + { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, + { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, + { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, + { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, + { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, + { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, + { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, + { 0, NULL } + }; + static struct proc_xfs_info xfs_info_unset[] = { + /* the few simple ones we can get from the mount struct */ + { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, + { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, + { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, + { 0, NULL } + }; + struct proc_xfs_info *xfs_infop; + + for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { + if (mp->m_flags & xfs_infop->flag) + seq_puts(m, xfs_infop->str); + } + for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { + if (!(mp->m_flags & xfs_infop->flag)) + seq_puts(m, xfs_infop->str); + } + + if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) + seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", + (int)(1 << mp->m_writeio_log) >> 10); + + if (mp->m_logbufs > 0) + seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); + if (mp->m_logbsize > 0) + seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); + + if (mp->m_logname) + seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); + if (mp->m_rtname) + seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); + + if (mp->m_dalign > 0) + seq_printf(m, "," MNTOPT_SUNIT "=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); + if (mp->m_swidth > 0) + seq_printf(m, "," MNTOPT_SWIDTH "=%d", + (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); + + if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) + seq_puts(m, "," MNTOPT_USRQUOTA); + else if (mp->m_qflags & XFS_UQUOTA_ACCT) + seq_puts(m, "," MNTOPT_UQUOTANOENF); + + /* Either project or group quotas can be active, not both */ + + if (mp->m_qflags & XFS_PQUOTA_ACCT) { + if (mp->m_qflags & XFS_OQUOTA_ENFD) + seq_puts(m, "," MNTOPT_PRJQUOTA); + else + seq_puts(m, "," MNTOPT_PQUOTANOENF); + } else if (mp->m_qflags & XFS_GQUOTA_ACCT) { + if (mp->m_qflags & XFS_OQUOTA_ENFD) + seq_puts(m, "," MNTOPT_GRPQUOTA); + else + seq_puts(m, "," MNTOPT_GQUOTANOENF); + } + + if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) + seq_puts(m, "," MNTOPT_NOQUOTA); + + return 0; +} +__uint64_t +xfs_max_file_offset( + unsigned int blockshift) +{ + unsigned int pagefactor = 1; + unsigned int bitshift = BITS_PER_LONG - 1; + + /* Figure out maximum filesize, on Linux this can depend on + * the filesystem blocksize (on 32 bit platforms). + * __block_write_begin does this in an [unsigned] long... + * page->index << (PAGE_CACHE_SHIFT - bbits) + * So, for page sized blocks (4K on 32 bit platforms), + * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is + * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) + * but for smaller blocksizes it is less (bbits = log2 bsize). + * Note1: get_block_t takes a long (implicit cast from above) + * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch + * can optionally convert the [unsigned] long from above into + * an [unsigned] long long. + */ + +#if BITS_PER_LONG == 32 +# if defined(CONFIG_LBDAF) + ASSERT(sizeof(sector_t) == 8); + pagefactor = PAGE_CACHE_SIZE; + bitshift = BITS_PER_LONG; +# else + pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); +# endif +#endif + + return (((__uint64_t)pagefactor) << bitshift) - 1; +} + +STATIC int +xfs_blkdev_get( + xfs_mount_t *mp, + const char *name, + struct block_device **bdevp) +{ + int error = 0; + + *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + mp); + if (IS_ERR(*bdevp)) { + error = PTR_ERR(*bdevp); + xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); + } + + return -error; +} + +STATIC void +xfs_blkdev_put( + struct block_device *bdev) +{ + if (bdev) + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); +} + +void +xfs_blkdev_issue_flush( + xfs_buftarg_t *buftarg) +{ + blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL); +} + +STATIC void +xfs_close_devices( + struct xfs_mount *mp) +{ + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { + struct block_device *logdev = mp->m_logdev_targp->bt_bdev; + xfs_free_buftarg(mp, mp->m_logdev_targp); + xfs_blkdev_put(logdev); + } + if (mp->m_rtdev_targp) { + struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; + xfs_free_buftarg(mp, mp->m_rtdev_targp); + xfs_blkdev_put(rtdev); + } + xfs_free_buftarg(mp, mp->m_ddev_targp); +} + +/* + * The file system configurations are: + * (1) device (partition) with data and internal log + * (2) logical volume with data and log subvolumes. + * (3) logical volume with data, log, and realtime subvolumes. + * + * We only have to handle opening the log and realtime volumes here if + * they are present. The data subvolume has already been opened by + * get_sb_bdev() and is stored in sb->s_bdev. + */ +STATIC int +xfs_open_devices( + struct xfs_mount *mp) +{ + struct block_device *ddev = mp->m_super->s_bdev; + struct block_device *logdev = NULL, *rtdev = NULL; + int error; + + /* + * Open real time and log devices - order is important. + */ + if (mp->m_logname) { + error = xfs_blkdev_get(mp, mp->m_logname, &logdev); + if (error) + goto out; + } + + if (mp->m_rtname) { + error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); + if (error) + goto out_close_logdev; + + if (rtdev == ddev || rtdev == logdev) { + xfs_warn(mp, + "Cannot mount filesystem with identical rtdev and ddev/logdev."); + error = EINVAL; + goto out_close_rtdev; + } + } + + /* + * Setup xfs_mount buffer target pointers + */ + error = ENOMEM; + mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname); + if (!mp->m_ddev_targp) + goto out_close_rtdev; + + if (rtdev) { + mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1, + mp->m_fsname); + if (!mp->m_rtdev_targp) + goto out_free_ddev_targ; + } + + if (logdev && logdev != ddev) { + mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1, + mp->m_fsname); + if (!mp->m_logdev_targp) + goto out_free_rtdev_targ; + } else { + mp->m_logdev_targp = mp->m_ddev_targp; + } + + return 0; + + out_free_rtdev_targ: + if (mp->m_rtdev_targp) + xfs_free_buftarg(mp, mp->m_rtdev_targp); + out_free_ddev_targ: + xfs_free_buftarg(mp, mp->m_ddev_targp); + out_close_rtdev: + if (rtdev) + xfs_blkdev_put(rtdev); + out_close_logdev: + if (logdev && logdev != ddev) + xfs_blkdev_put(logdev); + out: + return error; +} + +/* + * Setup xfs_mount buffer target pointers based on superblock + */ +STATIC int +xfs_setup_devices( + struct xfs_mount *mp) +{ + int error; + + error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize, + mp->m_sb.sb_sectsize); + if (error) + return error; + + if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { + unsigned int log_sector_size = BBSIZE; + + if (xfs_sb_version_hassector(&mp->m_sb)) + log_sector_size = mp->m_sb.sb_logsectsize; + error = xfs_setsize_buftarg(mp->m_logdev_targp, + mp->m_sb.sb_blocksize, + log_sector_size); + if (error) + return error; + } + if (mp->m_rtdev_targp) { + error = xfs_setsize_buftarg(mp->m_rtdev_targp, + mp->m_sb.sb_blocksize, + mp->m_sb.sb_sectsize); + if (error) + return error; + } + + return 0; +} + +/* Catch misguided souls that try to use this interface on XFS */ +STATIC struct inode * +xfs_fs_alloc_inode( + struct super_block *sb) +{ + BUG(); + return NULL; +} + +/* + * Now that the generic code is guaranteed not to be accessing + * the linux inode, we can reclaim the inode. + */ +STATIC void +xfs_fs_destroy_inode( + struct inode *inode) +{ + struct xfs_inode *ip = XFS_I(inode); + + trace_xfs_destroy_inode(ip); + + XFS_STATS_INC(vn_reclaim); + + /* bad inode, get out here ASAP */ + if (is_bad_inode(inode)) + goto out_reclaim; + + xfs_ioend_wait(ip); + + ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); + + /* + * We should never get here with one of the reclaim flags already set. + */ + ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); + ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); + + /* + * We always use background reclaim here because even if the + * inode is clean, it still may be under IO and hence we have + * to take the flush lock. The background reclaim path handles + * this more efficiently than we can here, so simply let background + * reclaim tear down all inodes. + */ +out_reclaim: + xfs_inode_set_reclaim_tag(ip); +} + +/* + * Slab object creation initialisation for the XFS inode. + * This covers only the idempotent fields in the XFS inode; + * all other fields need to be initialised on allocation + * from the slab. This avoids the need to repeatedly initialise + * fields in the xfs inode that left in the initialise state + * when freeing the inode. + */ +STATIC void +xfs_fs_inode_init_once( + void *inode) +{ + struct xfs_inode *ip = inode; + + memset(ip, 0, sizeof(struct xfs_inode)); + + /* vfs inode */ + inode_init_once(VFS_I(ip)); + + /* xfs inode */ + atomic_set(&ip->i_iocount, 0); + atomic_set(&ip->i_pincount, 0); + spin_lock_init(&ip->i_flags_lock); + init_waitqueue_head(&ip->i_ipin_wait); + /* + * Because we want to use a counting completion, complete + * the flush completion once to allow a single access to + * the flush completion without blocking. + */ + init_completion(&ip->i_flush); + complete(&ip->i_flush); + + mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, + "xfsino", ip->i_ino); +} + +/* + * Dirty the XFS inode when mark_inode_dirty_sync() is called so that + * we catch unlogged VFS level updates to the inode. + * + * We need the barrier() to maintain correct ordering between unlogged + * updates and the transaction commit code that clears the i_update_core + * field. This requires all updates to be completed before marking the + * inode dirty. + */ +STATIC void +xfs_fs_dirty_inode( + struct inode *inode, + int flags) +{ + barrier(); + XFS_I(inode)->i_update_core = 1; +} + +STATIC int +xfs_log_inode( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + + if (error) { + xfs_trans_cancel(tp, 0); + /* we need to return with the lock hold shared */ + xfs_ilock(ip, XFS_ILOCK_SHARED); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Note - it's possible that we might have pushed ourselves out of the + * way during trans_reserve which would flush the inode. But there's + * no guarantee that the inode buffer has actually gone out yet (it's + * delwri). Plus the buffer could be pinned anyway if it's part of + * an inode in another recent transaction. So we play it safe and + * fire off the transaction anyway. + */ + xfs_trans_ijoin(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + error = xfs_trans_commit(tp, 0); + xfs_ilock_demote(ip, XFS_ILOCK_EXCL); + + return error; +} + +STATIC int +xfs_fs_write_inode( + struct inode *inode, + struct writeback_control *wbc) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + int error = EAGAIN; + + trace_xfs_write_inode(ip); + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + if (wbc->sync_mode == WB_SYNC_ALL) { + /* + * Make sure the inode has made it it into the log. Instead + * of forcing it all the way to stable storage using a + * synchronous transaction we let the log force inside the + * ->sync_fs call do that for thus, which reduces the number + * of synchronous log foces dramatically. + */ + xfs_ioend_wait(ip); + xfs_ilock(ip, XFS_ILOCK_SHARED); + if (ip->i_update_core) { + error = xfs_log_inode(ip); + if (error) + goto out_unlock; + } + } else { + /* + * We make this non-blocking if the inode is contended, return + * EAGAIN to indicate to the caller that they did not succeed. + * This prevents the flush path from blocking on inodes inside + * another operation right now, they get caught later by + * xfs_sync. + */ + if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) + goto out; + + if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) + goto out_unlock; + + /* + * Now we have the flush lock and the inode is not pinned, we + * can check if the inode is really clean as we know that + * there are no pending transaction completions, it is not + * waiting on the delayed write queue and there is no IO in + * progress. + */ + if (xfs_inode_clean(ip)) { + xfs_ifunlock(ip); + error = 0; + goto out_unlock; + } + error = xfs_iflush(ip, SYNC_TRYLOCK); + } + + out_unlock: + xfs_iunlock(ip, XFS_ILOCK_SHARED); + out: + /* + * if we failed to write out the inode then mark + * it dirty again so we'll try again later. + */ + if (error) + xfs_mark_inode_dirty_sync(ip); + return -error; +} + +STATIC void +xfs_fs_evict_inode( + struct inode *inode) +{ + xfs_inode_t *ip = XFS_I(inode); + + trace_xfs_evict_inode(ip); + + truncate_inode_pages(&inode->i_data, 0); + end_writeback(inode); + XFS_STATS_INC(vn_rele); + XFS_STATS_INC(vn_remove); + XFS_STATS_DEC(vn_active); + + /* + * The iolock is used by the file system to coordinate reads, + * writes, and block truncates. Up to this point the lock + * protected concurrent accesses by users of the inode. But + * from here forward we're doing some final processing of the + * inode because we're done with it, and although we reuse the + * iolock for protection it is really a distinct lock class + * (in the lockdep sense) from before. To keep lockdep happy + * (and basically indicate what we are doing), we explicitly + * re-init the iolock here. + */ + ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); + lockdep_set_class_and_name(&ip->i_iolock.mr_lock, + &xfs_iolock_reclaimable, "xfs_iolock_reclaimable"); + + xfs_inactive(ip); +} + +STATIC void +xfs_free_fsname( + struct xfs_mount *mp) +{ + kfree(mp->m_fsname); + kfree(mp->m_rtname); + kfree(mp->m_logname); +} + +STATIC void +xfs_fs_put_super( + struct super_block *sb) +{ + struct xfs_mount *mp = XFS_M(sb); + + xfs_syncd_stop(mp); + + /* + * Blow away any referenced inode in the filestreams cache. + * This can and will cause log traffic as inodes go inactive + * here. + */ + xfs_filestream_unmount(mp); + + XFS_bflush(mp->m_ddev_targp); + + xfs_unmountfs(mp); + xfs_freesb(mp); + xfs_icsb_destroy_counters(mp); + xfs_close_devices(mp); + xfs_free_fsname(mp); + kfree(mp); +} + +STATIC int +xfs_fs_sync_fs( + struct super_block *sb, + int wait) +{ + struct xfs_mount *mp = XFS_M(sb); + int error; + + /* + * Not much we can do for the first async pass. Writing out the + * superblock would be counter-productive as we are going to redirty + * when writing out other data and metadata (and writing out a single + * block is quite fast anyway). + * + * Try to asynchronously kick off quota syncing at least. + */ + if (!wait) { + xfs_qm_sync(mp, SYNC_TRYLOCK); + return 0; + } + + error = xfs_quiesce_data(mp); + if (error) + return -error; + + if (laptop_mode) { + /* + * The disk must be active because we're syncing. + * We schedule xfssyncd now (now that the disk is + * active) instead of later (when it might not be). + */ + flush_delayed_work_sync(&mp->m_sync_work); + } + + return 0; +} + +STATIC int +xfs_fs_statfs( + struct dentry *dentry, + struct kstatfs *statp) +{ + struct xfs_mount *mp = XFS_M(dentry->d_sb); + xfs_sb_t *sbp = &mp->m_sb; + struct xfs_inode *ip = XFS_I(dentry->d_inode); + __uint64_t fakeinos, id; + xfs_extlen_t lsize; + __int64_t ffree; + + statp->f_type = XFS_SB_MAGIC; + statp->f_namelen = MAXNAMELEN - 1; + + id = huge_encode_dev(mp->m_ddev_targp->bt_dev); + statp->f_fsid.val[0] = (u32)id; + statp->f_fsid.val[1] = (u32)(id >> 32); + + xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); + + spin_lock(&mp->m_sb_lock); + statp->f_bsize = sbp->sb_blocksize; + lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; + statp->f_blocks = sbp->sb_dblocks - lsize; + statp->f_bfree = statp->f_bavail = + sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); + fakeinos = statp->f_bfree << sbp->sb_inopblog; + statp->f_files = + MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); + if (mp->m_maxicount) + statp->f_files = min_t(typeof(statp->f_files), + statp->f_files, + mp->m_maxicount); + + /* make sure statp->f_ffree does not underflow */ + ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); + statp->f_ffree = max_t(__int64_t, ffree, 0); + + spin_unlock(&mp->m_sb_lock); + + if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) || + ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) == + (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) + xfs_qm_statvfs(ip, statp); + return 0; +} + +STATIC void +xfs_save_resvblks(struct xfs_mount *mp) +{ + __uint64_t resblks = 0; + + mp->m_resblks_save = mp->m_resblks; + xfs_reserve_blocks(mp, &resblks, NULL); +} + +STATIC void +xfs_restore_resvblks(struct xfs_mount *mp) +{ + __uint64_t resblks; + + if (mp->m_resblks_save) { + resblks = mp->m_resblks_save; + mp->m_resblks_save = 0; + } else + resblks = xfs_default_resblks(mp); + + xfs_reserve_blocks(mp, &resblks, NULL); +} + +STATIC int +xfs_fs_remount( + struct super_block *sb, + int *flags, + char *options) +{ + struct xfs_mount *mp = XFS_M(sb); + substring_t args[MAX_OPT_ARGS]; + char *p; + int error; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + + if (!*p) + continue; + + token = match_token(p, tokens, args); + switch (token) { + case Opt_barrier: + mp->m_flags |= XFS_MOUNT_BARRIER; + break; + case Opt_nobarrier: + mp->m_flags &= ~XFS_MOUNT_BARRIER; + break; + default: + /* + * Logically we would return an error here to prevent + * users from believing they might have changed + * mount options using remount which can't be changed. + * + * But unfortunately mount(8) adds all options from + * mtab and fstab to the mount arguments in some cases + * so we can't blindly reject options, but have to + * check for each specified option if it actually + * differs from the currently set option and only + * reject it if that's the case. + * + * Until that is implemented we return success for + * every remount request, and silently ignore all + * options that we can't actually change. + */ +#if 0 + xfs_info(mp, + "mount option \"%s\" not supported for remount\n", p); + return -EINVAL; +#else + break; +#endif + } + } + + /* ro -> rw */ + if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { + mp->m_flags &= ~XFS_MOUNT_RDONLY; + + /* + * If this is the first remount to writeable state we + * might have some superblock changes to update. + */ + if (mp->m_update_flags) { + error = xfs_mount_log_sb(mp, mp->m_update_flags); + if (error) { + xfs_warn(mp, "failed to write sb changes"); + return error; + } + mp->m_update_flags = 0; + } + + /* + * Fill out the reserve pool if it is empty. Use the stashed + * value if it is non-zero, otherwise go with the default. + */ + xfs_restore_resvblks(mp); + } + + /* rw -> ro */ + if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { + /* + * After we have synced the data but before we sync the + * metadata, we need to free up the reserve block pool so that + * the used block count in the superblock on disk is correct at + * the end of the remount. Stash the current reserve pool size + * so that if we get remounted rw, we can return it to the same + * size. + */ + + xfs_quiesce_data(mp); + xfs_save_resvblks(mp); + xfs_quiesce_attr(mp); + mp->m_flags |= XFS_MOUNT_RDONLY; + } + + return 0; +} + +/* + * Second stage of a freeze. The data is already frozen so we only + * need to take care of the metadata. Once that's done write a dummy + * record to dirty the log in case of a crash while frozen. + */ +STATIC int +xfs_fs_freeze( + struct super_block *sb) +{ + struct xfs_mount *mp = XFS_M(sb); + + xfs_save_resvblks(mp); + xfs_quiesce_attr(mp); + return -xfs_fs_log_dummy(mp); +} + +STATIC int +xfs_fs_unfreeze( + struct super_block *sb) +{ + struct xfs_mount *mp = XFS_M(sb); + + xfs_restore_resvblks(mp); + return 0; +} + +STATIC int +xfs_fs_show_options( + struct seq_file *m, + struct vfsmount *mnt) +{ + return -xfs_showargs(XFS_M(mnt->mnt_sb), m); +} + +/* + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock _has_ now been read in. + */ +STATIC int +xfs_finish_flags( + struct xfs_mount *mp) +{ + int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); + + /* Fail a mount where the logbuf is smaller than the log stripe */ + if (xfs_sb_version_haslogv2(&mp->m_sb)) { + if (mp->m_logbsize <= 0 && + mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { + mp->m_logbsize = mp->m_sb.sb_logsunit; + } else if (mp->m_logbsize > 0 && + mp->m_logbsize < mp->m_sb.sb_logsunit) { + xfs_warn(mp, + "logbuf size must be greater than or equal to log stripe size"); + return XFS_ERROR(EINVAL); + } + } else { + /* Fail a mount if the logbuf is larger than 32K */ + if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { + xfs_warn(mp, + "logbuf size for version 1 logs must be 16K or 32K"); + return XFS_ERROR(EINVAL); + } + } + + /* + * mkfs'ed attr2 will turn on attr2 mount unless explicitly + * told by noattr2 to turn it off + */ + if (xfs_sb_version_hasattr2(&mp->m_sb) && + !(mp->m_flags & XFS_MOUNT_NOATTR2)) + mp->m_flags |= XFS_MOUNT_ATTR2; + + /* + * prohibit r/w mounts of read-only filesystems + */ + if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { + xfs_warn(mp, + "cannot mount a read-only filesystem as read-write"); + return XFS_ERROR(EROFS); + } + + return 0; +} + +STATIC int +xfs_fs_fill_super( + struct super_block *sb, + void *data, + int silent) +{ + struct inode *root; + struct xfs_mount *mp = NULL; + int flags = 0, error = ENOMEM; + + mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); + if (!mp) + goto out; + + spin_lock_init(&mp->m_sb_lock); + mutex_init(&mp->m_growlock); + atomic_set(&mp->m_active_trans, 0); + + mp->m_super = sb; + sb->s_fs_info = mp; + + error = xfs_parseargs(mp, (char *)data); + if (error) + goto out_free_fsname; + + sb_min_blocksize(sb, BBSIZE); + sb->s_xattr = xfs_xattr_handlers; + sb->s_export_op = &xfs_export_operations; +#ifdef CONFIG_XFS_QUOTA + sb->s_qcop = &xfs_quotactl_operations; +#endif + sb->s_op = &xfs_super_operations; + + if (silent) + flags |= XFS_MFSI_QUIET; + + error = xfs_open_devices(mp); + if (error) + goto out_free_fsname; + + error = xfs_icsb_init_counters(mp); + if (error) + goto out_close_devices; + + error = xfs_readsb(mp, flags); + if (error) + goto out_destroy_counters; + + error = xfs_finish_flags(mp); + if (error) + goto out_free_sb; + + error = xfs_setup_devices(mp); + if (error) + goto out_free_sb; + + error = xfs_filestream_mount(mp); + if (error) + goto out_free_sb; + + /* + * we must configure the block size in the superblock before we run the + * full mount process as the mount process can lookup and cache inodes. + * For the same reason we must also initialise the syncd and register + * the inode cache shrinker so that inodes can be reclaimed during + * operations like a quotacheck that iterate all inodes in the + * filesystem. + */ + sb->s_magic = XFS_SB_MAGIC; + sb->s_blocksize = mp->m_sb.sb_blocksize; + sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; + sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); + sb->s_time_gran = 1; + set_posix_acl_flag(sb); + + error = xfs_mountfs(mp); + if (error) + goto out_filestream_unmount; + + error = xfs_syncd_init(mp); + if (error) + goto out_unmount; + + root = igrab(VFS_I(mp->m_rootip)); + if (!root) { + error = ENOENT; + goto out_syncd_stop; + } + if (is_bad_inode(root)) { + error = EINVAL; + goto out_syncd_stop; + } + sb->s_root = d_alloc_root(root); + if (!sb->s_root) { + error = ENOMEM; + goto out_iput; + } + + return 0; + + out_filestream_unmount: + xfs_filestream_unmount(mp); + out_free_sb: + xfs_freesb(mp); + out_destroy_counters: + xfs_icsb_destroy_counters(mp); + out_close_devices: + xfs_close_devices(mp); + out_free_fsname: + xfs_free_fsname(mp); + kfree(mp); + out: + return -error; + + out_iput: + iput(root); + out_syncd_stop: + xfs_syncd_stop(mp); + out_unmount: + /* + * Blow away any referenced inode in the filestreams cache. + * This can and will cause log traffic as inodes go inactive + * here. + */ + xfs_filestream_unmount(mp); + + XFS_bflush(mp->m_ddev_targp); + + xfs_unmountfs(mp); + goto out_free_sb; +} + +STATIC struct dentry * +xfs_fs_mount( + struct file_system_type *fs_type, + int flags, + const char *dev_name, + void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); +} + +static int +xfs_fs_nr_cached_objects( + struct super_block *sb) +{ + return xfs_reclaim_inodes_count(XFS_M(sb)); +} + +static void +xfs_fs_free_cached_objects( + struct super_block *sb, + int nr_to_scan) +{ + xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); +} + +static const struct super_operations xfs_super_operations = { + .alloc_inode = xfs_fs_alloc_inode, + .destroy_inode = xfs_fs_destroy_inode, + .dirty_inode = xfs_fs_dirty_inode, + .write_inode = xfs_fs_write_inode, + .evict_inode = xfs_fs_evict_inode, + .put_super = xfs_fs_put_super, + .sync_fs = xfs_fs_sync_fs, + .freeze_fs = xfs_fs_freeze, + .unfreeze_fs = xfs_fs_unfreeze, + .statfs = xfs_fs_statfs, + .remount_fs = xfs_fs_remount, + .show_options = xfs_fs_show_options, + .nr_cached_objects = xfs_fs_nr_cached_objects, + .free_cached_objects = xfs_fs_free_cached_objects, +}; + +static struct file_system_type xfs_fs_type = { + .owner = THIS_MODULE, + .name = "xfs", + .mount = xfs_fs_mount, + .kill_sb = kill_block_super, + .fs_flags = FS_REQUIRES_DEV, +}; + +STATIC int __init +xfs_init_zones(void) +{ + + xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); + if (!xfs_ioend_zone) + goto out; + + xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, + xfs_ioend_zone); + if (!xfs_ioend_pool) + goto out_destroy_ioend_zone; + + xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t), + "xfs_log_ticket"); + if (!xfs_log_ticket_zone) + goto out_destroy_ioend_pool; + + xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t), + "xfs_bmap_free_item"); + if (!xfs_bmap_free_item_zone) + goto out_destroy_log_ticket_zone; + + xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), + "xfs_btree_cur"); + if (!xfs_btree_cur_zone) + goto out_destroy_bmap_free_item_zone; + + xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t), + "xfs_da_state"); + if (!xfs_da_state_zone) + goto out_destroy_btree_cur_zone; + + xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); + if (!xfs_dabuf_zone) + goto out_destroy_da_state_zone; + + xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); + if (!xfs_ifork_zone) + goto out_destroy_dabuf_zone; + + xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans"); + if (!xfs_trans_zone) + goto out_destroy_ifork_zone; + + xfs_log_item_desc_zone = + kmem_zone_init(sizeof(struct xfs_log_item_desc), + "xfs_log_item_desc"); + if (!xfs_log_item_desc_zone) + goto out_destroy_trans_zone; + + /* + * The size of the zone allocated buf log item is the maximum + * size possible under XFS. This wastes a little bit of memory, + * but it is much faster. + */ + xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) + + (((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / + NBWORD) * sizeof(int))), "xfs_buf_item"); + if (!xfs_buf_item_zone) + goto out_destroy_log_item_desc_zone; + + xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) + + ((XFS_EFD_MAX_FAST_EXTENTS - 1) * + sizeof(xfs_extent_t))), "xfs_efd_item"); + if (!xfs_efd_zone) + goto out_destroy_buf_item_zone; + + xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) + + ((XFS_EFI_MAX_FAST_EXTENTS - 1) * + sizeof(xfs_extent_t))), "xfs_efi_item"); + if (!xfs_efi_zone) + goto out_destroy_efd_zone; + + xfs_inode_zone = + kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, + xfs_fs_inode_init_once); + if (!xfs_inode_zone) + goto out_destroy_efi_zone; + + xfs_ili_zone = + kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili", + KM_ZONE_SPREAD, NULL); + if (!xfs_ili_zone) + goto out_destroy_inode_zone; + + return 0; + + out_destroy_inode_zone: + kmem_zone_destroy(xfs_inode_zone); + out_destroy_efi_zone: + kmem_zone_destroy(xfs_efi_zone); + out_destroy_efd_zone: + kmem_zone_destroy(xfs_efd_zone); + out_destroy_buf_item_zone: + kmem_zone_destroy(xfs_buf_item_zone); + out_destroy_log_item_desc_zone: + kmem_zone_destroy(xfs_log_item_desc_zone); + out_destroy_trans_zone: + kmem_zone_destroy(xfs_trans_zone); + out_destroy_ifork_zone: + kmem_zone_destroy(xfs_ifork_zone); + out_destroy_dabuf_zone: + kmem_zone_destroy(xfs_dabuf_zone); + out_destroy_da_state_zone: + kmem_zone_destroy(xfs_da_state_zone); + out_destroy_btree_cur_zone: + kmem_zone_destroy(xfs_btree_cur_zone); + out_destroy_bmap_free_item_zone: + kmem_zone_destroy(xfs_bmap_free_item_zone); + out_destroy_log_ticket_zone: + kmem_zone_destroy(xfs_log_ticket_zone); + out_destroy_ioend_pool: + mempool_destroy(xfs_ioend_pool); + out_destroy_ioend_zone: + kmem_zone_destroy(xfs_ioend_zone); + out: + return -ENOMEM; +} + +STATIC void +xfs_destroy_zones(void) +{ + kmem_zone_destroy(xfs_ili_zone); + kmem_zone_destroy(xfs_inode_zone); + kmem_zone_destroy(xfs_efi_zone); + kmem_zone_destroy(xfs_efd_zone); + kmem_zone_destroy(xfs_buf_item_zone); + kmem_zone_destroy(xfs_log_item_desc_zone); + kmem_zone_destroy(xfs_trans_zone); + kmem_zone_destroy(xfs_ifork_zone); + kmem_zone_destroy(xfs_dabuf_zone); + kmem_zone_destroy(xfs_da_state_zone); + kmem_zone_destroy(xfs_btree_cur_zone); + kmem_zone_destroy(xfs_bmap_free_item_zone); + kmem_zone_destroy(xfs_log_ticket_zone); + mempool_destroy(xfs_ioend_pool); + kmem_zone_destroy(xfs_ioend_zone); + +} + +STATIC int __init +xfs_init_workqueues(void) +{ + /* + * max_active is set to 8 to give enough concurency to allow + * multiple work operations on each CPU to run. This allows multiple + * filesystems to be running sync work concurrently, and scales with + * the number of CPUs in the system. + */ + xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); + if (!xfs_syncd_wq) + goto out; + + xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); + if (!xfs_ail_wq) + goto out_destroy_syncd; + + return 0; + +out_destroy_syncd: + destroy_workqueue(xfs_syncd_wq); +out: + return -ENOMEM; +} + +STATIC void +xfs_destroy_workqueues(void) +{ + destroy_workqueue(xfs_ail_wq); + destroy_workqueue(xfs_syncd_wq); +} + +STATIC int __init +init_xfs_fs(void) +{ + int error; + + printk(KERN_INFO XFS_VERSION_STRING " with " + XFS_BUILD_OPTIONS " enabled\n"); + + xfs_ioend_init(); + xfs_dir_startup(); + + error = xfs_init_zones(); + if (error) + goto out; + + error = xfs_init_workqueues(); + if (error) + goto out_destroy_zones; + + error = xfs_mru_cache_init(); + if (error) + goto out_destroy_wq; + + error = xfs_filestream_init(); + if (error) + goto out_mru_cache_uninit; + + error = xfs_buf_init(); + if (error) + goto out_filestream_uninit; + + error = xfs_init_procfs(); + if (error) + goto out_buf_terminate; + + error = xfs_sysctl_register(); + if (error) + goto out_cleanup_procfs; + + vfs_initquota(); + + error = register_filesystem(&xfs_fs_type); + if (error) + goto out_sysctl_unregister; + return 0; + + out_sysctl_unregister: + xfs_sysctl_unregister(); + out_cleanup_procfs: + xfs_cleanup_procfs(); + out_buf_terminate: + xfs_buf_terminate(); + out_filestream_uninit: + xfs_filestream_uninit(); + out_mru_cache_uninit: + xfs_mru_cache_uninit(); + out_destroy_wq: + xfs_destroy_workqueues(); + out_destroy_zones: + xfs_destroy_zones(); + out: + return error; +} + +STATIC void __exit +exit_xfs_fs(void) +{ + vfs_exitquota(); + unregister_filesystem(&xfs_fs_type); + xfs_sysctl_unregister(); + xfs_cleanup_procfs(); + xfs_buf_terminate(); + xfs_filestream_uninit(); + xfs_mru_cache_uninit(); + xfs_destroy_workqueues(); + xfs_destroy_zones(); +} + +module_init(init_xfs_fs); +module_exit(exit_xfs_fs); + +MODULE_AUTHOR("Silicon Graphics, Inc."); +MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); +MODULE_LICENSE("GPL"); diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h new file mode 100644 index 0000000..50a3266 --- /dev/null +++ b/fs/xfs/xfs_super.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SUPER_H__ +#define __XFS_SUPER_H__ + +#include + +#ifdef CONFIG_XFS_QUOTA +extern void xfs_qm_init(void); +extern void xfs_qm_exit(void); +# define vfs_initquota() xfs_qm_init() +# define vfs_exitquota() xfs_qm_exit() +#else +# define vfs_initquota() do { } while (0) +# define vfs_exitquota() do { } while (0) +#endif + +#ifdef CONFIG_XFS_POSIX_ACL +# define XFS_ACL_STRING "ACLs, " +# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL) +#else +# define XFS_ACL_STRING +# define set_posix_acl_flag(sb) do { } while (0) +#endif + +#define XFS_SECURITY_STRING "security attributes, " + +#ifdef CONFIG_XFS_RT +# define XFS_REALTIME_STRING "realtime, " +#else +# define XFS_REALTIME_STRING +#endif + +#if XFS_BIG_BLKNOS +# if XFS_BIG_INUMS +# define XFS_BIGFS_STRING "large block/inode numbers, " +# else +# define XFS_BIGFS_STRING "large block numbers, " +# endif +#else +# define XFS_BIGFS_STRING +#endif + +#ifdef DEBUG +# define XFS_DBG_STRING "debug" +#else +# define XFS_DBG_STRING "no debug" +#endif + +#define XFS_VERSION_STRING "SGI XFS" +#define XFS_BUILD_OPTIONS XFS_ACL_STRING \ + XFS_SECURITY_STRING \ + XFS_REALTIME_STRING \ + XFS_BIGFS_STRING \ + XFS_DBG_STRING /* DBG must be last */ + +struct xfs_inode; +struct xfs_mount; +struct xfs_buftarg; +struct block_device; + +extern __uint64_t xfs_max_file_offset(unsigned int); + +extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); + +extern const struct export_operations xfs_export_operations; +extern const struct xattr_handler *xfs_xattr_handlers[]; +extern const struct quotactl_ops xfs_quotactl_operations; + +#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) + +#endif /* __XFS_SUPER_H__ */ diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c new file mode 100644 index 0000000..4604f90 --- /dev/null +++ b/fs/xfs/xfs_sync.c @@ -0,0 +1,1065 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_types.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_trans_priv.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_dinode.h" +#include "xfs_error.h" +#include "xfs_filestream.h" +#include "xfs_vnodeops.h" +#include "xfs_inode_item.h" +#include "xfs_quota.h" +#include "xfs_trace.h" +#include "xfs_fsops.h" + +#include +#include + +struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ + +/* + * The inode lookup is done in batches to keep the amount of lock traffic and + * radix tree lookups to a minimum. The batch size is a trade off between + * lookup reduction and stack usage. This is in the reclaim path, so we can't + * be too greedy. + */ +#define XFS_LOOKUP_BATCH 32 + +STATIC int +xfs_inode_ag_walk_grab( + struct xfs_inode *ip) +{ + struct inode *inode = VFS_I(ip); + + ASSERT(rcu_read_lock_held()); + + /* + * check for stale RCU freed inode + * + * If the inode has been reallocated, it doesn't matter if it's not in + * the AG we are walking - we are walking for writeback, so if it + * passes all the "valid inode" checks and is dirty, then we'll write + * it back anyway. If it has been reallocated and still being + * initialised, the XFS_INEW check below will catch it. + */ + spin_lock(&ip->i_flags_lock); + if (!ip->i_ino) + goto out_unlock_noent; + + /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ + if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) + goto out_unlock_noent; + spin_unlock(&ip->i_flags_lock); + + /* nothing to sync during shutdown */ + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return EFSCORRUPTED; + + /* If we can't grab the inode, it must on it's way to reclaim. */ + if (!igrab(inode)) + return ENOENT; + + if (is_bad_inode(inode)) { + IRELE(ip); + return ENOENT; + } + + /* inode is valid */ + return 0; + +out_unlock_noent: + spin_unlock(&ip->i_flags_lock); + return ENOENT; +} + +STATIC int +xfs_inode_ag_walk( + struct xfs_mount *mp, + struct xfs_perag *pag, + int (*execute)(struct xfs_inode *ip, + struct xfs_perag *pag, int flags), + int flags) +{ + uint32_t first_index; + int last_error = 0; + int skipped; + int done; + int nr_found; + +restart: + done = 0; + skipped = 0; + first_index = 0; + nr_found = 0; + do { + struct xfs_inode *batch[XFS_LOOKUP_BATCH]; + int error = 0; + int i; + + rcu_read_lock(); + nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, + (void **)batch, first_index, + XFS_LOOKUP_BATCH); + if (!nr_found) { + rcu_read_unlock(); + break; + } + + /* + * Grab the inodes before we drop the lock. if we found + * nothing, nr == 0 and the loop will be skipped. + */ + for (i = 0; i < nr_found; i++) { + struct xfs_inode *ip = batch[i]; + + if (done || xfs_inode_ag_walk_grab(ip)) + batch[i] = NULL; + + /* + * Update the index for the next lookup. Catch + * overflows into the next AG range which can occur if + * we have inodes in the last block of the AG and we + * are currently pointing to the last inode. + * + * Because we may see inodes that are from the wrong AG + * due to RCU freeing and reallocation, only update the + * index if it lies in this AG. It was a race that lead + * us to see this inode, so another lookup from the + * same index will not find it again. + */ + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) + continue; + first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) + done = 1; + } + + /* unlock now we've grabbed the inodes. */ + rcu_read_unlock(); + + for (i = 0; i < nr_found; i++) { + if (!batch[i]) + continue; + error = execute(batch[i], pag, flags); + IRELE(batch[i]); + if (error == EAGAIN) { + skipped++; + continue; + } + if (error && last_error != EFSCORRUPTED) + last_error = error; + } + + /* bail out if the filesystem is corrupted. */ + if (error == EFSCORRUPTED) + break; + + cond_resched(); + + } while (nr_found && !done); + + if (skipped) { + delay(1); + goto restart; + } + return last_error; +} + +int +xfs_inode_ag_iterator( + struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, + struct xfs_perag *pag, int flags), + int flags) +{ + struct xfs_perag *pag; + int error = 0; + int last_error = 0; + xfs_agnumber_t ag; + + ag = 0; + while ((pag = xfs_perag_get(mp, ag))) { + ag = pag->pag_agno + 1; + error = xfs_inode_ag_walk(mp, pag, execute, flags); + xfs_perag_put(pag); + if (error) { + last_error = error; + if (error == EFSCORRUPTED) + break; + } + } + return XFS_ERROR(last_error); +} + +STATIC int +xfs_sync_inode_data( + struct xfs_inode *ip, + struct xfs_perag *pag, + int flags) +{ + struct inode *inode = VFS_I(ip); + struct address_space *mapping = inode->i_mapping; + int error = 0; + + if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) + goto out_wait; + + if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { + if (flags & SYNC_TRYLOCK) + goto out_wait; + xfs_ilock(ip, XFS_IOLOCK_SHARED); + } + + error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? + 0 : XBF_ASYNC, FI_NONE); + xfs_iunlock(ip, XFS_IOLOCK_SHARED); + + out_wait: + if (flags & SYNC_WAIT) + xfs_ioend_wait(ip); + return error; +} + +STATIC int +xfs_sync_inode_attr( + struct xfs_inode *ip, + struct xfs_perag *pag, + int flags) +{ + int error = 0; + + xfs_ilock(ip, XFS_ILOCK_SHARED); + if (xfs_inode_clean(ip)) + goto out_unlock; + if (!xfs_iflock_nowait(ip)) { + if (!(flags & SYNC_WAIT)) + goto out_unlock; + xfs_iflock(ip); + } + + if (xfs_inode_clean(ip)) { + xfs_ifunlock(ip); + goto out_unlock; + } + + error = xfs_iflush(ip, flags); + + /* + * We don't want to try again on non-blocking flushes that can't run + * again immediately. If an inode really must be written, then that's + * what the SYNC_WAIT flag is for. + */ + if (error == EAGAIN) { + ASSERT(!(flags & SYNC_WAIT)); + error = 0; + } + + out_unlock: + xfs_iunlock(ip, XFS_ILOCK_SHARED); + return error; +} + +/* + * Write out pagecache data for the whole filesystem. + */ +STATIC int +xfs_sync_data( + struct xfs_mount *mp, + int flags) +{ + int error; + + ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); + + error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); + if (error) + return XFS_ERROR(error); + + xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); + return 0; +} + +/* + * Write out inode metadata (attributes) for the whole filesystem. + */ +STATIC int +xfs_sync_attr( + struct xfs_mount *mp, + int flags) +{ + ASSERT((flags & ~SYNC_WAIT) == 0); + + return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags); +} + +STATIC int +xfs_sync_fsdata( + struct xfs_mount *mp) +{ + struct xfs_buf *bp; + + /* + * If the buffer is pinned then push on the log so we won't get stuck + * waiting in the write for someone, maybe ourselves, to flush the log. + * + * Even though we just pushed the log above, we did not have the + * superblock buffer locked at that point so it can become pinned in + * between there and here. + */ + bp = xfs_getsb(mp, 0); + if (xfs_buf_ispinned(bp)) + xfs_log_force(mp, 0); + + return xfs_bwrite(mp, bp); +} + +/* + * When remounting a filesystem read-only or freezing the filesystem, we have + * two phases to execute. This first phase is syncing the data before we + * quiesce the filesystem, and the second is flushing all the inodes out after + * we've waited for all the transactions created by the first phase to + * complete. The second phase ensures that the inodes are written to their + * location on disk rather than just existing in transactions in the log. This + * means after a quiesce there is no log replay required to write the inodes to + * disk (this is the main difference between a sync and a quiesce). + */ +/* + * First stage of freeze - no writers will make progress now we are here, + * so we flush delwri and delalloc buffers here, then wait for all I/O to + * complete. Data is frozen at that point. Metadata is not frozen, + * transactions can still occur here so don't bother flushing the buftarg + * because it'll just get dirty again. + */ +int +xfs_quiesce_data( + struct xfs_mount *mp) +{ + int error, error2 = 0; + + xfs_qm_sync(mp, SYNC_TRYLOCK); + xfs_qm_sync(mp, SYNC_WAIT); + + /* force out the newly dirtied log buffers */ + xfs_log_force(mp, XFS_LOG_SYNC); + + /* write superblock and hoover up shutdown errors */ + error = xfs_sync_fsdata(mp); + + /* make sure all delwri buffers are written out */ + xfs_flush_buftarg(mp->m_ddev_targp, 1); + + /* mark the log as covered if needed */ + if (xfs_log_need_covered(mp)) + error2 = xfs_fs_log_dummy(mp); + + /* flush data-only devices */ + if (mp->m_rtdev_targp) + XFS_bflush(mp->m_rtdev_targp); + + return error ? error : error2; +} + +STATIC void +xfs_quiesce_fs( + struct xfs_mount *mp) +{ + int count = 0, pincount; + + xfs_reclaim_inodes(mp, 0); + xfs_flush_buftarg(mp->m_ddev_targp, 0); + + /* + * This loop must run at least twice. The first instance of the loop + * will flush most meta data but that will generate more meta data + * (typically directory updates). Which then must be flushed and + * logged before we can write the unmount record. We also so sync + * reclaim of inodes to catch any that the above delwri flush skipped. + */ + do { + xfs_reclaim_inodes(mp, SYNC_WAIT); + xfs_sync_attr(mp, SYNC_WAIT); + pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); + if (!pincount) { + delay(50); + count++; + } + } while (count < 2); +} + +/* + * Second stage of a quiesce. The data is already synced, now we have to take + * care of the metadata. New transactions are already blocked, so we need to + * wait for any remaining transactions to drain out before proceeding. + */ +void +xfs_quiesce_attr( + struct xfs_mount *mp) +{ + int error = 0; + + /* wait for all modifications to complete */ + while (atomic_read(&mp->m_active_trans) > 0) + delay(100); + + /* flush inodes and push all remaining buffers out to disk */ + xfs_quiesce_fs(mp); + + /* + * Just warn here till VFS can correctly support + * read-only remount without racing. + */ + WARN_ON(atomic_read(&mp->m_active_trans) != 0); + + /* Push the superblock and write an unmount record */ + error = xfs_log_sbcount(mp); + if (error) + xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " + "Frozen image may not be consistent."); + xfs_log_unmount_write(mp); + xfs_unmountfs_writesb(mp); +} + +static void +xfs_syncd_queue_sync( + struct xfs_mount *mp) +{ + queue_delayed_work(xfs_syncd_wq, &mp->m_sync_work, + msecs_to_jiffies(xfs_syncd_centisecs * 10)); +} + +/* + * Every sync period we need to unpin all items, reclaim inodes and sync + * disk quotas. We might need to cover the log to indicate that the + * filesystem is idle and not frozen. + */ +STATIC void +xfs_sync_worker( + struct work_struct *work) +{ + struct xfs_mount *mp = container_of(to_delayed_work(work), + struct xfs_mount, m_sync_work); + int error; + + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { + /* dgc: errors ignored here */ + if (mp->m_super->s_frozen == SB_UNFROZEN && + xfs_log_need_covered(mp)) + error = xfs_fs_log_dummy(mp); + else + xfs_log_force(mp, 0); + error = xfs_qm_sync(mp, SYNC_TRYLOCK); + + /* start pushing all the metadata that is currently dirty */ + xfs_ail_push_all(mp->m_ail); + } + + /* queue us up again */ + xfs_syncd_queue_sync(mp); +} + +/* + * Queue a new inode reclaim pass if there are reclaimable inodes and there + * isn't a reclaim pass already in progress. By default it runs every 5s based + * on the xfs syncd work default of 30s. Perhaps this should have it's own + * tunable, but that can be done if this method proves to be ineffective or too + * aggressive. + */ +static void +xfs_syncd_queue_reclaim( + struct xfs_mount *mp) +{ + + /* + * We can have inodes enter reclaim after we've shut down the syncd + * workqueue during unmount, so don't allow reclaim work to be queued + * during unmount. + */ + if (!(mp->m_super->s_flags & MS_ACTIVE)) + return; + + rcu_read_lock(); + if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { + queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, + msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); + } + rcu_read_unlock(); +} + +/* + * This is a fast pass over the inode cache to try to get reclaim moving on as + * many inodes as possible in a short period of time. It kicks itself every few + * seconds, as well as being kicked by the inode cache shrinker when memory + * goes low. It scans as quickly as possible avoiding locked inodes or those + * already being flushed, and once done schedules a future pass. + */ +STATIC void +xfs_reclaim_worker( + struct work_struct *work) +{ + struct xfs_mount *mp = container_of(to_delayed_work(work), + struct xfs_mount, m_reclaim_work); + + xfs_reclaim_inodes(mp, SYNC_TRYLOCK); + xfs_syncd_queue_reclaim(mp); +} + +/* + * Flush delayed allocate data, attempting to free up reserved space + * from existing allocations. At this point a new allocation attempt + * has failed with ENOSPC and we are in the process of scratching our + * heads, looking about for more room. + * + * Queue a new data flush if there isn't one already in progress and + * wait for completion of the flush. This means that we only ever have one + * inode flush in progress no matter how many ENOSPC events are occurring and + * so will prevent the system from bogging down due to every concurrent + * ENOSPC event scanning all the active inodes in the system for writeback. + */ +void +xfs_flush_inodes( + struct xfs_inode *ip) +{ + struct xfs_mount *mp = ip->i_mount; + + queue_work(xfs_syncd_wq, &mp->m_flush_work); + flush_work_sync(&mp->m_flush_work); +} + +STATIC void +xfs_flush_worker( + struct work_struct *work) +{ + struct xfs_mount *mp = container_of(work, + struct xfs_mount, m_flush_work); + + xfs_sync_data(mp, SYNC_TRYLOCK); + xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); +} + +int +xfs_syncd_init( + struct xfs_mount *mp) +{ + INIT_WORK(&mp->m_flush_work, xfs_flush_worker); + INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); + INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); + + xfs_syncd_queue_sync(mp); + xfs_syncd_queue_reclaim(mp); + + return 0; +} + +void +xfs_syncd_stop( + struct xfs_mount *mp) +{ + cancel_delayed_work_sync(&mp->m_sync_work); + cancel_delayed_work_sync(&mp->m_reclaim_work); + cancel_work_sync(&mp->m_flush_work); +} + +void +__xfs_inode_set_reclaim_tag( + struct xfs_perag *pag, + struct xfs_inode *ip) +{ + radix_tree_tag_set(&pag->pag_ici_root, + XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), + XFS_ICI_RECLAIM_TAG); + + if (!pag->pag_ici_reclaimable) { + /* propagate the reclaim tag up into the perag radix tree */ + spin_lock(&ip->i_mount->m_perag_lock); + radix_tree_tag_set(&ip->i_mount->m_perag_tree, + XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), + XFS_ICI_RECLAIM_TAG); + spin_unlock(&ip->i_mount->m_perag_lock); + + /* schedule periodic background inode reclaim */ + xfs_syncd_queue_reclaim(ip->i_mount); + + trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, + -1, _RET_IP_); + } + pag->pag_ici_reclaimable++; +} + +/* + * We set the inode flag atomically with the radix tree tag. + * Once we get tag lookups on the radix tree, this inode flag + * can go away. + */ +void +xfs_inode_set_reclaim_tag( + xfs_inode_t *ip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_perag *pag; + + pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); + spin_lock(&pag->pag_ici_lock); + spin_lock(&ip->i_flags_lock); + __xfs_inode_set_reclaim_tag(pag, ip); + __xfs_iflags_set(ip, XFS_IRECLAIMABLE); + spin_unlock(&ip->i_flags_lock); + spin_unlock(&pag->pag_ici_lock); + xfs_perag_put(pag); +} + +STATIC void +__xfs_inode_clear_reclaim( + xfs_perag_t *pag, + xfs_inode_t *ip) +{ + pag->pag_ici_reclaimable--; + if (!pag->pag_ici_reclaimable) { + /* clear the reclaim tag from the perag radix tree */ + spin_lock(&ip->i_mount->m_perag_lock); + radix_tree_tag_clear(&ip->i_mount->m_perag_tree, + XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), + XFS_ICI_RECLAIM_TAG); + spin_unlock(&ip->i_mount->m_perag_lock); + trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, + -1, _RET_IP_); + } +} + +void +__xfs_inode_clear_reclaim_tag( + xfs_mount_t *mp, + xfs_perag_t *pag, + xfs_inode_t *ip) +{ + radix_tree_tag_clear(&pag->pag_ici_root, + XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); + __xfs_inode_clear_reclaim(pag, ip); +} + +/* + * Grab the inode for reclaim exclusively. + * Return 0 if we grabbed it, non-zero otherwise. + */ +STATIC int +xfs_reclaim_inode_grab( + struct xfs_inode *ip, + int flags) +{ + ASSERT(rcu_read_lock_held()); + + /* quick check for stale RCU freed inode */ + if (!ip->i_ino) + return 1; + + /* + * do some unlocked checks first to avoid unnecessary lock traffic. + * The first is a flush lock check, the second is a already in reclaim + * check. Only do these checks if we are not going to block on locks. + */ + if ((flags & SYNC_TRYLOCK) && + (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { + return 1; + } + + /* + * The radix tree lock here protects a thread in xfs_iget from racing + * with us starting reclaim on the inode. Once we have the + * XFS_IRECLAIM flag set it will not touch us. + * + * Due to RCU lookup, we may find inodes that have been freed and only + * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that + * aren't candidates for reclaim at all, so we must check the + * XFS_IRECLAIMABLE is set first before proceeding to reclaim. + */ + spin_lock(&ip->i_flags_lock); + if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || + __xfs_iflags_test(ip, XFS_IRECLAIM)) { + /* not a reclaim candidate. */ + spin_unlock(&ip->i_flags_lock); + return 1; + } + __xfs_iflags_set(ip, XFS_IRECLAIM); + spin_unlock(&ip->i_flags_lock); + return 0; +} + +/* + * Inodes in different states need to be treated differently, and the return + * value of xfs_iflush is not sufficient to get this right. The following table + * lists the inode states and the reclaim actions necessary for non-blocking + * reclaim: + * + * + * inode state iflush ret required action + * --------------- ---------- --------------- + * bad - reclaim + * shutdown EIO unpin and reclaim + * clean, unpinned 0 reclaim + * stale, unpinned 0 reclaim + * clean, pinned(*) 0 requeue + * stale, pinned EAGAIN requeue + * dirty, delwri ok 0 requeue + * dirty, delwri blocked EAGAIN requeue + * dirty, sync flush 0 reclaim + * + * (*) dgc: I don't think the clean, pinned state is possible but it gets + * handled anyway given the order of checks implemented. + * + * As can be seen from the table, the return value of xfs_iflush() is not + * sufficient to correctly decide the reclaim action here. The checks in + * xfs_iflush() might look like duplicates, but they are not. + * + * Also, because we get the flush lock first, we know that any inode that has + * been flushed delwri has had the flush completed by the time we check that + * the inode is clean. The clean inode check needs to be done before flushing + * the inode delwri otherwise we would loop forever requeuing clean inodes as + * we cannot tell apart a successful delwri flush and a clean inode from the + * return value of xfs_iflush(). + * + * Note that because the inode is flushed delayed write by background + * writeback, the flush lock may already be held here and waiting on it can + * result in very long latencies. Hence for sync reclaims, where we wait on the + * flush lock, the caller should push out delayed write inodes first before + * trying to reclaim them to minimise the amount of time spent waiting. For + * background relaim, we just requeue the inode for the next pass. + * + * Hence the order of actions after gaining the locks should be: + * bad => reclaim + * shutdown => unpin and reclaim + * pinned, delwri => requeue + * pinned, sync => unpin + * stale => reclaim + * clean => reclaim + * dirty, delwri => flush and requeue + * dirty, sync => flush, wait and reclaim + */ +STATIC int +xfs_reclaim_inode( + struct xfs_inode *ip, + struct xfs_perag *pag, + int sync_mode) +{ + int error; + +restart: + error = 0; + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (!xfs_iflock_nowait(ip)) { + if (!(sync_mode & SYNC_WAIT)) + goto out; + xfs_iflock(ip); + } + + if (is_bad_inode(VFS_I(ip))) + goto reclaim; + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_iunpin_wait(ip); + goto reclaim; + } + if (xfs_ipincount(ip)) { + if (!(sync_mode & SYNC_WAIT)) { + xfs_ifunlock(ip); + goto out; + } + xfs_iunpin_wait(ip); + } + if (xfs_iflags_test(ip, XFS_ISTALE)) + goto reclaim; + if (xfs_inode_clean(ip)) + goto reclaim; + + /* + * Now we have an inode that needs flushing. + * + * We do a nonblocking flush here even if we are doing a SYNC_WAIT + * reclaim as we can deadlock with inode cluster removal. + * xfs_ifree_cluster() can lock the inode buffer before it locks the + * ip->i_lock, and we are doing the exact opposite here. As a result, + * doing a blocking xfs_itobp() to get the cluster buffer will result + * in an ABBA deadlock with xfs_ifree_cluster(). + * + * As xfs_ifree_cluser() must gather all inodes that are active in the + * cache to mark them stale, if we hit this case we don't actually want + * to do IO here - we want the inode marked stale so we can simply + * reclaim it. Hence if we get an EAGAIN error on a SYNC_WAIT flush, + * just unlock the inode, back off and try again. Hopefully the next + * pass through will see the stale flag set on the inode. + */ + error = xfs_iflush(ip, SYNC_TRYLOCK | sync_mode); + if (sync_mode & SYNC_WAIT) { + if (error == EAGAIN) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* backoff longer than in xfs_ifree_cluster */ + delay(2); + goto restart; + } + xfs_iflock(ip); + goto reclaim; + } + + /* + * When we have to flush an inode but don't have SYNC_WAIT set, we + * flush the inode out using a delwri buffer and wait for the next + * call into reclaim to find it in a clean state instead of waiting for + * it now. We also don't return errors here - if the error is transient + * then the next reclaim pass will flush the inode, and if the error + * is permanent then the next sync reclaim will reclaim the inode and + * pass on the error. + */ + if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { + xfs_warn(ip->i_mount, + "inode 0x%llx background reclaim flush failed with %d", + (long long)ip->i_ino, error); + } +out: + xfs_iflags_clear(ip, XFS_IRECLAIM); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + /* + * We could return EAGAIN here to make reclaim rescan the inode tree in + * a short while. However, this just burns CPU time scanning the tree + * waiting for IO to complete and xfssyncd never goes back to the idle + * state. Instead, return 0 to let the next scheduled background reclaim + * attempt to reclaim the inode again. + */ + return 0; + +reclaim: + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + XFS_STATS_INC(xs_ig_reclaims); + /* + * Remove the inode from the per-AG radix tree. + * + * Because radix_tree_delete won't complain even if the item was never + * added to the tree assert that it's been there before to catch + * problems with the inode life time early on. + */ + spin_lock(&pag->pag_ici_lock); + if (!radix_tree_delete(&pag->pag_ici_root, + XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) + ASSERT(0); + __xfs_inode_clear_reclaim(pag, ip); + spin_unlock(&pag->pag_ici_lock); + + /* + * Here we do an (almost) spurious inode lock in order to coordinate + * with inode cache radix tree lookups. This is because the lookup + * can reference the inodes in the cache without taking references. + * + * We make that OK here by ensuring that we wait until the inode is + * unlocked after the lookup before we go ahead and free it. We get + * both the ilock and the iolock because the code may need to drop the + * ilock one but will still hold the iolock. + */ + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + xfs_qm_dqdetach(ip); + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + + xfs_inode_free(ip); + return error; + +} + +/* + * Walk the AGs and reclaim the inodes in them. Even if the filesystem is + * corrupted, we still want to try to reclaim all the inodes. If we don't, + * then a shut down during filesystem unmount reclaim walk leak all the + * unreclaimed inodes. + */ +int +xfs_reclaim_inodes_ag( + struct xfs_mount *mp, + int flags, + int *nr_to_scan) +{ + struct xfs_perag *pag; + int error = 0; + int last_error = 0; + xfs_agnumber_t ag; + int trylock = flags & SYNC_TRYLOCK; + int skipped; + +restart: + ag = 0; + skipped = 0; + while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { + unsigned long first_index = 0; + int done = 0; + int nr_found = 0; + + ag = pag->pag_agno + 1; + + if (trylock) { + if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { + skipped++; + xfs_perag_put(pag); + continue; + } + first_index = pag->pag_ici_reclaim_cursor; + } else + mutex_lock(&pag->pag_ici_reclaim_lock); + + do { + struct xfs_inode *batch[XFS_LOOKUP_BATCH]; + int i; + + rcu_read_lock(); + nr_found = radix_tree_gang_lookup_tag( + &pag->pag_ici_root, + (void **)batch, first_index, + XFS_LOOKUP_BATCH, + XFS_ICI_RECLAIM_TAG); + if (!nr_found) { + done = 1; + rcu_read_unlock(); + break; + } + + /* + * Grab the inodes before we drop the lock. if we found + * nothing, nr == 0 and the loop will be skipped. + */ + for (i = 0; i < nr_found; i++) { + struct xfs_inode *ip = batch[i]; + + if (done || xfs_reclaim_inode_grab(ip, flags)) + batch[i] = NULL; + + /* + * Update the index for the next lookup. Catch + * overflows into the next AG range which can + * occur if we have inodes in the last block of + * the AG and we are currently pointing to the + * last inode. + * + * Because we may see inodes that are from the + * wrong AG due to RCU freeing and + * reallocation, only update the index if it + * lies in this AG. It was a race that lead us + * to see this inode, so another lookup from + * the same index will not find it again. + */ + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != + pag->pag_agno) + continue; + first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) + done = 1; + } + + /* unlock now we've grabbed the inodes. */ + rcu_read_unlock(); + + for (i = 0; i < nr_found; i++) { + if (!batch[i]) + continue; + error = xfs_reclaim_inode(batch[i], pag, flags); + if (error && last_error != EFSCORRUPTED) + last_error = error; + } + + *nr_to_scan -= XFS_LOOKUP_BATCH; + + cond_resched(); + + } while (nr_found && !done && *nr_to_scan > 0); + + if (trylock && !done) + pag->pag_ici_reclaim_cursor = first_index; + else + pag->pag_ici_reclaim_cursor = 0; + mutex_unlock(&pag->pag_ici_reclaim_lock); + xfs_perag_put(pag); + } + + /* + * if we skipped any AG, and we still have scan count remaining, do + * another pass this time using blocking reclaim semantics (i.e + * waiting on the reclaim locks and ignoring the reclaim cursors). This + * ensure that when we get more reclaimers than AGs we block rather + * than spin trying to execute reclaim. + */ + if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { + trylock = 0; + goto restart; + } + return XFS_ERROR(last_error); +} + +int +xfs_reclaim_inodes( + xfs_mount_t *mp, + int mode) +{ + int nr_to_scan = INT_MAX; + + return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); +} + +/* + * Scan a certain number of inodes for reclaim. + * + * When called we make sure that there is a background (fast) inode reclaim in + * progress, while we will throttle the speed of reclaim via doing synchronous + * reclaim of inodes. That means if we come across dirty inodes, we wait for + * them to be cleaned, which we hope will not be very long due to the + * background walker having already kicked the IO off on those dirty inodes. + */ +void +xfs_reclaim_inodes_nr( + struct xfs_mount *mp, + int nr_to_scan) +{ + /* kick background reclaimer and push the AIL */ + xfs_syncd_queue_reclaim(mp); + xfs_ail_push_all(mp->m_ail); + + xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); +} + +/* + * Return the number of reclaimable inodes in the filesystem for + * the shrinker to determine how much to reclaim. + */ +int +xfs_reclaim_inodes_count( + struct xfs_mount *mp) +{ + struct xfs_perag *pag; + xfs_agnumber_t ag = 0; + int reclaimable = 0; + + while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { + ag = pag->pag_agno + 1; + reclaimable += pag->pag_ici_reclaimable; + xfs_perag_put(pag); + } + return reclaimable; +} + diff --git a/fs/xfs/xfs_sync.h b/fs/xfs/xfs_sync.h new file mode 100644 index 0000000..941202e --- /dev/null +++ b/fs/xfs/xfs_sync.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef XFS_SYNC_H +#define XFS_SYNC_H 1 + +struct xfs_mount; +struct xfs_perag; + +#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ +#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ + +extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ + +int xfs_syncd_init(struct xfs_mount *mp); +void xfs_syncd_stop(struct xfs_mount *mp); + +int xfs_quiesce_data(struct xfs_mount *mp); +void xfs_quiesce_attr(struct xfs_mount *mp); + +void xfs_flush_inodes(struct xfs_inode *ip); + +int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); +int xfs_reclaim_inodes_count(struct xfs_mount *mp); +void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); + +void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); +void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); +void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, + struct xfs_inode *ip); + +int xfs_sync_inode_grab(struct xfs_inode *ip); +int xfs_inode_ag_iterator(struct xfs_mount *mp, + int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), + int flags); + +#endif diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c new file mode 100644 index 0000000..ee2d2ad --- /dev/null +++ b/fs/xfs/xfs_sysctl.c @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2001-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include +#include +#include "xfs_error.h" + +static struct ctl_table_header *xfs_table_header; + +#ifdef CONFIG_PROC_FS +STATIC int +xfs_stats_clear_proc_handler( + ctl_table *ctl, + int write, + void __user *buffer, + size_t *lenp, + loff_t *ppos) +{ + int c, ret, *valp = ctl->data; + __uint32_t vn_active; + + ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); + + if (!ret && write && *valp) { + xfs_notice(NULL, "Clearing xfsstats"); + for_each_possible_cpu(c) { + preempt_disable(); + /* save vn_active, it's a universal truth! */ + vn_active = per_cpu(xfsstats, c).vn_active; + memset(&per_cpu(xfsstats, c), 0, + sizeof(struct xfsstats)); + per_cpu(xfsstats, c).vn_active = vn_active; + preempt_enable(); + } + xfs_stats_clear = 0; + } + + return ret; +} + +STATIC int +xfs_panic_mask_proc_handler( + ctl_table *ctl, + int write, + void __user *buffer, + size_t *lenp, + loff_t *ppos) +{ + int ret, *valp = ctl->data; + + ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); + if (!ret && write) { + xfs_panic_mask = *valp; +#ifdef DEBUG + xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES); +#endif + } + return ret; +} +#endif /* CONFIG_PROC_FS */ + +static ctl_table xfs_table[] = { + { + .procname = "irix_sgid_inherit", + .data = &xfs_params.sgid_inherit.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.sgid_inherit.min, + .extra2 = &xfs_params.sgid_inherit.max + }, + { + .procname = "irix_symlink_mode", + .data = &xfs_params.symlink_mode.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.symlink_mode.min, + .extra2 = &xfs_params.symlink_mode.max + }, + { + .procname = "panic_mask", + .data = &xfs_params.panic_mask.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = xfs_panic_mask_proc_handler, + .extra1 = &xfs_params.panic_mask.min, + .extra2 = &xfs_params.panic_mask.max + }, + + { + .procname = "error_level", + .data = &xfs_params.error_level.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.error_level.min, + .extra2 = &xfs_params.error_level.max + }, + { + .procname = "xfssyncd_centisecs", + .data = &xfs_params.syncd_timer.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.syncd_timer.min, + .extra2 = &xfs_params.syncd_timer.max + }, + { + .procname = "inherit_sync", + .data = &xfs_params.inherit_sync.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.inherit_sync.min, + .extra2 = &xfs_params.inherit_sync.max + }, + { + .procname = "inherit_nodump", + .data = &xfs_params.inherit_nodump.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.inherit_nodump.min, + .extra2 = &xfs_params.inherit_nodump.max + }, + { + .procname = "inherit_noatime", + .data = &xfs_params.inherit_noatim.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.inherit_noatim.min, + .extra2 = &xfs_params.inherit_noatim.max + }, + { + .procname = "xfsbufd_centisecs", + .data = &xfs_params.xfs_buf_timer.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.xfs_buf_timer.min, + .extra2 = &xfs_params.xfs_buf_timer.max + }, + { + .procname = "age_buffer_centisecs", + .data = &xfs_params.xfs_buf_age.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.xfs_buf_age.min, + .extra2 = &xfs_params.xfs_buf_age.max + }, + { + .procname = "inherit_nosymlinks", + .data = &xfs_params.inherit_nosym.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.inherit_nosym.min, + .extra2 = &xfs_params.inherit_nosym.max + }, + { + .procname = "rotorstep", + .data = &xfs_params.rotorstep.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.rotorstep.min, + .extra2 = &xfs_params.rotorstep.max + }, + { + .procname = "inherit_nodefrag", + .data = &xfs_params.inherit_nodfrg.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.inherit_nodfrg.min, + .extra2 = &xfs_params.inherit_nodfrg.max + }, + { + .procname = "filestream_centisecs", + .data = &xfs_params.fstrm_timer.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.fstrm_timer.min, + .extra2 = &xfs_params.fstrm_timer.max, + }, + /* please keep this the last entry */ +#ifdef CONFIG_PROC_FS + { + .procname = "stats_clear", + .data = &xfs_params.stats_clear.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = xfs_stats_clear_proc_handler, + .extra1 = &xfs_params.stats_clear.min, + .extra2 = &xfs_params.stats_clear.max + }, +#endif /* CONFIG_PROC_FS */ + + {} +}; + +static ctl_table xfs_dir_table[] = { + { + .procname = "xfs", + .mode = 0555, + .child = xfs_table + }, + {} +}; + +static ctl_table xfs_root_table[] = { + { + .procname = "fs", + .mode = 0555, + .child = xfs_dir_table + }, + {} +}; + +int +xfs_sysctl_register(void) +{ + xfs_table_header = register_sysctl_table(xfs_root_table); + if (!xfs_table_header) + return -ENOMEM; + return 0; +} + +void +xfs_sysctl_unregister(void) +{ + unregister_sysctl_table(xfs_table_header); +} diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h new file mode 100644 index 0000000..b9937d4 --- /dev/null +++ b/fs/xfs/xfs_sysctl.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2001-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_SYSCTL_H__ +#define __XFS_SYSCTL_H__ + +#include + +/* + * Tunable xfs parameters + */ + +typedef struct xfs_sysctl_val { + int min; + int val; + int max; +} xfs_sysctl_val_t; + +typedef struct xfs_param { + xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is + * not a member of parent dir GID. */ + xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */ + xfs_sysctl_val_t panic_mask; /* bitmask to cause panic on errors. */ + xfs_sysctl_val_t error_level; /* Degree of reporting for problems */ + xfs_sysctl_val_t syncd_timer; /* Interval between xfssyncd wakeups */ + xfs_sysctl_val_t stats_clear; /* Reset all XFS statistics to zero. */ + xfs_sysctl_val_t inherit_sync; /* Inherit the "sync" inode flag. */ + xfs_sysctl_val_t inherit_nodump;/* Inherit the "nodump" inode flag. */ + xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */ + xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */ + xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */ + xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */ + xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */ + xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ + xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */ +} xfs_param_t; + +/* + * xfs_error_level: + * + * How much error reporting will be done when internal problems are + * encountered. These problems normally return an EFSCORRUPTED to their + * caller, with no other information reported. + * + * 0 No error reports + * 1 Report EFSCORRUPTED errors that will cause a filesystem shutdown + * 5 Report all EFSCORRUPTED errors (all of the above errors, plus any + * additional errors that are known to not cause shutdowns) + * + * xfs_panic_mask bit 0x8 turns the error reports into panics + */ + +enum { + /* XFS_REFCACHE_SIZE = 1 */ + /* XFS_REFCACHE_PURGE = 2 */ + /* XFS_RESTRICT_CHOWN = 3 */ + XFS_SGID_INHERIT = 4, + XFS_SYMLINK_MODE = 5, + XFS_PANIC_MASK = 6, + XFS_ERRLEVEL = 7, + XFS_SYNCD_TIMER = 8, + /* XFS_PROBE_DMAPI = 9 */ + /* XFS_PROBE_IOOPS = 10 */ + /* XFS_PROBE_QUOTA = 11 */ + XFS_STATS_CLEAR = 12, + XFS_INHERIT_SYNC = 13, + XFS_INHERIT_NODUMP = 14, + XFS_INHERIT_NOATIME = 15, + XFS_BUF_TIMER = 16, + XFS_BUF_AGE = 17, + /* XFS_IO_BYPASS = 18 */ + XFS_INHERIT_NOSYM = 19, + XFS_ROTORSTEP = 20, + XFS_INHERIT_NODFRG = 21, + XFS_FILESTREAM_TIMER = 22, +}; + +extern xfs_param_t xfs_params; + +#ifdef CONFIG_SYSCTL +extern int xfs_sysctl_register(void); +extern void xfs_sysctl_unregister(void); +#else +# define xfs_sysctl_register() (0) +# define xfs_sysctl_unregister() do { } while (0) +#endif /* CONFIG_SYSCTL */ + +#endif /* __XFS_SYSCTL_H__ */ diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c new file mode 100644 index 0000000..9010ce8 --- /dev/null +++ b/fs/xfs/xfs_trace.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2009, Christoph Hellwig + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_types.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_da_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_dinode.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "xfs_mount.h" +#include "xfs_ialloc.h" +#include "xfs_itable.h" +#include "xfs_alloc.h" +#include "xfs_bmap.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_log_priv.h" +#include "xfs_buf_item.h" +#include "xfs_quota.h" +#include "xfs_iomap.h" +#include "xfs_aops.h" +#include "xfs_dquot_item.h" +#include "xfs_dquot.h" +#include "xfs_log_recover.h" +#include "xfs_inode_item.h" + +/* + * We include this last to have the helpers above available for the trace + * event implementations. + */ +#define CREATE_TRACE_POINTS +#include "xfs_trace.h" diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h new file mode 100644 index 0000000..690fc7a --- /dev/null +++ b/fs/xfs/xfs_trace.h @@ -0,0 +1,1746 @@ +/* + * Copyright (c) 2009, Christoph Hellwig + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM xfs + +#if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_XFS_H + +#include + +struct xfs_agf; +struct xfs_alloc_arg; +struct xfs_attr_list_context; +struct xfs_buf_log_item; +struct xfs_da_args; +struct xfs_da_node_entry; +struct xfs_dquot; +struct xlog_ticket; +struct log; +struct xlog_recover; +struct xlog_recover_item; +struct xfs_buf_log_format; +struct xfs_inode_log_format; + +DECLARE_EVENT_CLASS(xfs_attr_list_class, + TP_PROTO(struct xfs_attr_list_context *ctx), + TP_ARGS(ctx), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(u32, hashval) + __field(u32, blkno) + __field(u32, offset) + __field(void *, alist) + __field(int, bufsize) + __field(int, count) + __field(int, firstu) + __field(int, dupcnt) + __field(int, flags) + ), + TP_fast_assign( + __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; + __entry->ino = ctx->dp->i_ino; + __entry->hashval = ctx->cursor->hashval; + __entry->blkno = ctx->cursor->blkno; + __entry->offset = ctx->cursor->offset; + __entry->alist = ctx->alist; + __entry->bufsize = ctx->bufsize; + __entry->count = ctx->count; + __entry->firstu = ctx->firstu; + __entry->flags = ctx->flags; + ), + TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " + "alist 0x%p size %u count %u firstu %u flags %d %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->hashval, + __entry->blkno, + __entry->offset, + __entry->dupcnt, + __entry->alist, + __entry->bufsize, + __entry->count, + __entry->firstu, + __entry->flags, + __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) + ) +) + +#define DEFINE_ATTR_LIST_EVENT(name) \ +DEFINE_EVENT(xfs_attr_list_class, name, \ + TP_PROTO(struct xfs_attr_list_context *ctx), \ + TP_ARGS(ctx)) +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk); +DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound); + +DECLARE_EVENT_CLASS(xfs_perag_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, + unsigned long caller_ip), + TP_ARGS(mp, agno, refcount, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(int, refcount) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->refcount = refcount; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d agno %u refcount %d caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->refcount, + (char *)__entry->caller_ip) +); + +#define DEFINE_PERAG_REF_EVENT(name) \ +DEFINE_EVENT(xfs_perag_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \ + unsigned long caller_ip), \ + TP_ARGS(mp, agno, refcount, caller_ip)) +DEFINE_PERAG_REF_EVENT(xfs_perag_get); +DEFINE_PERAG_REF_EVENT(xfs_perag_get_tag); +DEFINE_PERAG_REF_EVENT(xfs_perag_put); +DEFINE_PERAG_REF_EVENT(xfs_perag_set_reclaim); +DEFINE_PERAG_REF_EVENT(xfs_perag_clear_reclaim); + +TRACE_EVENT(xfs_attr_list_node_descend, + TP_PROTO(struct xfs_attr_list_context *ctx, + struct xfs_da_node_entry *btree), + TP_ARGS(ctx, btree), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(u32, hashval) + __field(u32, blkno) + __field(u32, offset) + __field(void *, alist) + __field(int, bufsize) + __field(int, count) + __field(int, firstu) + __field(int, dupcnt) + __field(int, flags) + __field(u32, bt_hashval) + __field(u32, bt_before) + ), + TP_fast_assign( + __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; + __entry->ino = ctx->dp->i_ino; + __entry->hashval = ctx->cursor->hashval; + __entry->blkno = ctx->cursor->blkno; + __entry->offset = ctx->cursor->offset; + __entry->alist = ctx->alist; + __entry->bufsize = ctx->bufsize; + __entry->count = ctx->count; + __entry->firstu = ctx->firstu; + __entry->flags = ctx->flags; + __entry->bt_hashval = be32_to_cpu(btree->hashval); + __entry->bt_before = be32_to_cpu(btree->before); + ), + TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " + "alist 0x%p size %u count %u firstu %u flags %d %s " + "node hashval %u, node before %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->hashval, + __entry->blkno, + __entry->offset, + __entry->dupcnt, + __entry->alist, + __entry->bufsize, + __entry->count, + __entry->firstu, + __entry->flags, + __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS), + __entry->bt_hashval, + __entry->bt_before) +); + +TRACE_EVENT(xfs_iext_insert, + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, + struct xfs_bmbt_irec *r, int state, unsigned long caller_ip), + TP_ARGS(ip, idx, r, state, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_extnum_t, idx) + __field(xfs_fileoff_t, startoff) + __field(xfs_fsblock_t, startblock) + __field(xfs_filblks_t, blockcount) + __field(xfs_exntst_t, state) + __field(int, bmap_state) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->idx = idx; + __entry->startoff = r->br_startoff; + __entry->startblock = r->br_startblock; + __entry->blockcount = r->br_blockcount; + __entry->state = r->br_state; + __entry->bmap_state = state; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " + "offset %lld block %lld count %lld flag %d caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), + (long)__entry->idx, + __entry->startoff, + (__int64_t)__entry->startblock, + __entry->blockcount, + __entry->state, + (char *)__entry->caller_ip) +); + +DECLARE_EVENT_CLASS(xfs_bmap_class, + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, + unsigned long caller_ip), + TP_ARGS(ip, idx, state, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_extnum_t, idx) + __field(xfs_fileoff_t, startoff) + __field(xfs_fsblock_t, startblock) + __field(xfs_filblks_t, blockcount) + __field(xfs_exntst_t, state) + __field(int, bmap_state) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? + ip->i_afp : &ip->i_df; + struct xfs_bmbt_irec r; + + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->idx = idx; + __entry->startoff = r.br_startoff; + __entry->startblock = r.br_startblock; + __entry->blockcount = r.br_blockcount; + __entry->state = r.br_state; + __entry->bmap_state = state; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " + "offset %lld block %lld count %lld flag %d caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), + (long)__entry->idx, + __entry->startoff, + (__int64_t)__entry->startblock, + __entry->blockcount, + __entry->state, + (char *)__entry->caller_ip) +) + +#define DEFINE_BMAP_EVENT(name) \ +DEFINE_EVENT(xfs_bmap_class, name, \ + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \ + unsigned long caller_ip), \ + TP_ARGS(ip, idx, state, caller_ip)) +DEFINE_BMAP_EVENT(xfs_iext_remove); +DEFINE_BMAP_EVENT(xfs_bmap_pre_update); +DEFINE_BMAP_EVENT(xfs_bmap_post_update); +DEFINE_BMAP_EVENT(xfs_extlist); + +DECLARE_EVENT_CLASS(xfs_buf_class, + TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), + TP_ARGS(bp, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_daddr_t, bno) + __field(size_t, buffer_length) + __field(int, hold) + __field(int, pincount) + __field(unsigned, lockval) + __field(unsigned, flags) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = bp->b_target->bt_dev; + __entry->bno = bp->b_bn; + __entry->buffer_length = bp->b_buffer_length; + __entry->hold = atomic_read(&bp->b_hold); + __entry->pincount = atomic_read(&bp->b_pin_count); + __entry->lockval = bp->b_sema.count; + __entry->flags = bp->b_flags; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " + "lock %d flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->bno, + __entry->buffer_length, + __entry->hold, + __entry->pincount, + __entry->lockval, + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), + (void *)__entry->caller_ip) +) + +#define DEFINE_BUF_EVENT(name) \ +DEFINE_EVENT(xfs_buf_class, name, \ + TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ + TP_ARGS(bp, caller_ip)) +DEFINE_BUF_EVENT(xfs_buf_init); +DEFINE_BUF_EVENT(xfs_buf_free); +DEFINE_BUF_EVENT(xfs_buf_hold); +DEFINE_BUF_EVENT(xfs_buf_rele); +DEFINE_BUF_EVENT(xfs_buf_iodone); +DEFINE_BUF_EVENT(xfs_buf_iorequest); +DEFINE_BUF_EVENT(xfs_buf_bawrite); +DEFINE_BUF_EVENT(xfs_buf_bdwrite); +DEFINE_BUF_EVENT(xfs_buf_lock); +DEFINE_BUF_EVENT(xfs_buf_lock_done); +DEFINE_BUF_EVENT(xfs_buf_trylock); +DEFINE_BUF_EVENT(xfs_buf_unlock); +DEFINE_BUF_EVENT(xfs_buf_iowait); +DEFINE_BUF_EVENT(xfs_buf_iowait_done); +DEFINE_BUF_EVENT(xfs_buf_delwri_queue); +DEFINE_BUF_EVENT(xfs_buf_delwri_dequeue); +DEFINE_BUF_EVENT(xfs_buf_delwri_split); +DEFINE_BUF_EVENT(xfs_buf_get_uncached); +DEFINE_BUF_EVENT(xfs_bdstrat_shut); +DEFINE_BUF_EVENT(xfs_buf_item_relse); +DEFINE_BUF_EVENT(xfs_buf_item_iodone); +DEFINE_BUF_EVENT(xfs_buf_item_iodone_async); +DEFINE_BUF_EVENT(xfs_buf_error_relse); +DEFINE_BUF_EVENT(xfs_trans_read_buf_io); +DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); + +/* not really buffer traces, but the buf provides useful information */ +DEFINE_BUF_EVENT(xfs_btree_corrupt); +DEFINE_BUF_EVENT(xfs_da_btree_corrupt); +DEFINE_BUF_EVENT(xfs_reset_dqcounts); +DEFINE_BUF_EVENT(xfs_inode_item_push); + +/* pass flags explicitly */ +DECLARE_EVENT_CLASS(xfs_buf_flags_class, + TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), + TP_ARGS(bp, flags, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_daddr_t, bno) + __field(size_t, buffer_length) + __field(int, hold) + __field(int, pincount) + __field(unsigned, lockval) + __field(unsigned, flags) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = bp->b_target->bt_dev; + __entry->bno = bp->b_bn; + __entry->buffer_length = bp->b_buffer_length; + __entry->flags = flags; + __entry->hold = atomic_read(&bp->b_hold); + __entry->pincount = atomic_read(&bp->b_pin_count); + __entry->lockval = bp->b_sema.count; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " + "lock %d flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->bno, + __entry->buffer_length, + __entry->hold, + __entry->pincount, + __entry->lockval, + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), + (void *)__entry->caller_ip) +) + +#define DEFINE_BUF_FLAGS_EVENT(name) \ +DEFINE_EVENT(xfs_buf_flags_class, name, \ + TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ + TP_ARGS(bp, flags, caller_ip)) +DEFINE_BUF_FLAGS_EVENT(xfs_buf_find); +DEFINE_BUF_FLAGS_EVENT(xfs_buf_get); +DEFINE_BUF_FLAGS_EVENT(xfs_buf_read); + +TRACE_EVENT(xfs_buf_ioerror, + TP_PROTO(struct xfs_buf *bp, int error, unsigned long caller_ip), + TP_ARGS(bp, error, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_daddr_t, bno) + __field(size_t, buffer_length) + __field(unsigned, flags) + __field(int, hold) + __field(int, pincount) + __field(unsigned, lockval) + __field(int, error) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = bp->b_target->bt_dev; + __entry->bno = bp->b_bn; + __entry->buffer_length = bp->b_buffer_length; + __entry->hold = atomic_read(&bp->b_hold); + __entry->pincount = atomic_read(&bp->b_pin_count); + __entry->lockval = bp->b_sema.count; + __entry->error = error; + __entry->flags = bp->b_flags; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " + "lock %d error %d flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->bno, + __entry->buffer_length, + __entry->hold, + __entry->pincount, + __entry->lockval, + __entry->error, + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), + (void *)__entry->caller_ip) +); + +DECLARE_EVENT_CLASS(xfs_buf_item_class, + TP_PROTO(struct xfs_buf_log_item *bip), + TP_ARGS(bip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_daddr_t, buf_bno) + __field(size_t, buf_len) + __field(int, buf_hold) + __field(int, buf_pincount) + __field(int, buf_lockval) + __field(unsigned, buf_flags) + __field(unsigned, bli_recur) + __field(int, bli_refcount) + __field(unsigned, bli_flags) + __field(void *, li_desc) + __field(unsigned, li_flags) + ), + TP_fast_assign( + __entry->dev = bip->bli_buf->b_target->bt_dev; + __entry->bli_flags = bip->bli_flags; + __entry->bli_recur = bip->bli_recur; + __entry->bli_refcount = atomic_read(&bip->bli_refcount); + __entry->buf_bno = bip->bli_buf->b_bn; + __entry->buf_len = bip->bli_buf->b_buffer_length; + __entry->buf_flags = bip->bli_buf->b_flags; + __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); + __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); + __entry->buf_lockval = bip->bli_buf->b_sema.count; + __entry->li_desc = bip->bli_item.li_desc; + __entry->li_flags = bip->bli_item.li_flags; + ), + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " + "lock %d flags %s recur %d refcount %d bliflags %s " + "lidesc 0x%p liflags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->buf_bno, + __entry->buf_len, + __entry->buf_hold, + __entry->buf_pincount, + __entry->buf_lockval, + __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), + __entry->bli_recur, + __entry->bli_refcount, + __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), + __entry->li_desc, + __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) +) + +#define DEFINE_BUF_ITEM_EVENT(name) \ +DEFINE_EVENT(xfs_buf_item_class, name, \ + TP_PROTO(struct xfs_buf_log_item *bip), \ + TP_ARGS(bip)) +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_trylock); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); +DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur); +DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb); +DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur); +DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur); +DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf); +DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse); +DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin); +DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold); +DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); +DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); + +DECLARE_EVENT_CLASS(xfs_lock_class, + TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, + unsigned long caller_ip), + TP_ARGS(ip, lock_flags, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, lock_flags) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->lock_flags = lock_flags; + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), + (void *)__entry->caller_ip) +) + +#define DEFINE_LOCK_EVENT(name) \ +DEFINE_EVENT(xfs_lock_class, name, \ + TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \ + unsigned long caller_ip), \ + TP_ARGS(ip, lock_flags, caller_ip)) +DEFINE_LOCK_EVENT(xfs_ilock); +DEFINE_LOCK_EVENT(xfs_ilock_nowait); +DEFINE_LOCK_EVENT(xfs_ilock_demote); +DEFINE_LOCK_EVENT(xfs_iunlock); + +DECLARE_EVENT_CLASS(xfs_inode_class, + TP_PROTO(struct xfs_inode *ip), + TP_ARGS(ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + ), + TP_printk("dev %d:%d ino 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino) +) + +#define DEFINE_INODE_EVENT(name) \ +DEFINE_EVENT(xfs_inode_class, name, \ + TP_PROTO(struct xfs_inode *ip), \ + TP_ARGS(ip)) +DEFINE_INODE_EVENT(xfs_iget_skip); +DEFINE_INODE_EVENT(xfs_iget_reclaim); +DEFINE_INODE_EVENT(xfs_iget_reclaim_fail); +DEFINE_INODE_EVENT(xfs_iget_hit); +DEFINE_INODE_EVENT(xfs_iget_miss); + +DEFINE_INODE_EVENT(xfs_getattr); +DEFINE_INODE_EVENT(xfs_setattr); +DEFINE_INODE_EVENT(xfs_readlink); +DEFINE_INODE_EVENT(xfs_alloc_file_space); +DEFINE_INODE_EVENT(xfs_free_file_space); +DEFINE_INODE_EVENT(xfs_readdir); +#ifdef CONFIG_XFS_POSIX_ACL +DEFINE_INODE_EVENT(xfs_get_acl); +#endif +DEFINE_INODE_EVENT(xfs_vm_bmap); +DEFINE_INODE_EVENT(xfs_file_ioctl); +DEFINE_INODE_EVENT(xfs_file_compat_ioctl); +DEFINE_INODE_EVENT(xfs_ioctl_setattr); +DEFINE_INODE_EVENT(xfs_file_fsync); +DEFINE_INODE_EVENT(xfs_destroy_inode); +DEFINE_INODE_EVENT(xfs_write_inode); +DEFINE_INODE_EVENT(xfs_evict_inode); + +DEFINE_INODE_EVENT(xfs_dquot_dqalloc); +DEFINE_INODE_EVENT(xfs_dquot_dqdetach); + +DECLARE_EVENT_CLASS(xfs_iref_class, + TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), + TP_ARGS(ip, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, count) + __field(int, pincount) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->count = atomic_read(&VFS_I(ip)->i_count); + __entry->pincount = atomic_read(&ip->i_pincount); + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->count, + __entry->pincount, + (char *)__entry->caller_ip) +) + +#define DEFINE_IREF_EVENT(name) \ +DEFINE_EVENT(xfs_iref_class, name, \ + TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ + TP_ARGS(ip, caller_ip)) +DEFINE_IREF_EVENT(xfs_ihold); +DEFINE_IREF_EVENT(xfs_irele); +DEFINE_IREF_EVENT(xfs_inode_pin); +DEFINE_IREF_EVENT(xfs_inode_unpin); +DEFINE_IREF_EVENT(xfs_inode_unpin_nowait); + +DECLARE_EVENT_CLASS(xfs_namespace_class, + TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), + TP_ARGS(dp, name), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, dp_ino) + __dynamic_array(char, name, name->len) + ), + TP_fast_assign( + __entry->dev = VFS_I(dp)->i_sb->s_dev; + __entry->dp_ino = dp->i_ino; + memcpy(__get_str(name), name->name, name->len); + ), + TP_printk("dev %d:%d dp ino 0x%llx name %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->dp_ino, + __get_str(name)) +) + +#define DEFINE_NAMESPACE_EVENT(name) \ +DEFINE_EVENT(xfs_namespace_class, name, \ + TP_PROTO(struct xfs_inode *dp, struct xfs_name *name), \ + TP_ARGS(dp, name)) +DEFINE_NAMESPACE_EVENT(xfs_remove); +DEFINE_NAMESPACE_EVENT(xfs_link); +DEFINE_NAMESPACE_EVENT(xfs_lookup); +DEFINE_NAMESPACE_EVENT(xfs_create); +DEFINE_NAMESPACE_EVENT(xfs_symlink); + +TRACE_EVENT(xfs_rename, + TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp, + struct xfs_name *src_name, struct xfs_name *target_name), + TP_ARGS(src_dp, target_dp, src_name, target_name), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, src_dp_ino) + __field(xfs_ino_t, target_dp_ino) + __dynamic_array(char, src_name, src_name->len) + __dynamic_array(char, target_name, target_name->len) + ), + TP_fast_assign( + __entry->dev = VFS_I(src_dp)->i_sb->s_dev; + __entry->src_dp_ino = src_dp->i_ino; + __entry->target_dp_ino = target_dp->i_ino; + memcpy(__get_str(src_name), src_name->name, src_name->len); + memcpy(__get_str(target_name), target_name->name, target_name->len); + ), + TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx" + " src name %s target name %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->src_dp_ino, + __entry->target_dp_ino, + __get_str(src_name), + __get_str(target_name)) +) + +DECLARE_EVENT_CLASS(xfs_dquot_class, + TP_PROTO(struct xfs_dquot *dqp), + TP_ARGS(dqp), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(u32, id) + __field(unsigned, flags) + __field(unsigned, nrefs) + __field(unsigned long long, res_bcount) + __field(unsigned long long, bcount) + __field(unsigned long long, icount) + __field(unsigned long long, blk_hardlimit) + __field(unsigned long long, blk_softlimit) + __field(unsigned long long, ino_hardlimit) + __field(unsigned long long, ino_softlimit) + ), \ + TP_fast_assign( + __entry->dev = dqp->q_mount->m_super->s_dev; + __entry->id = be32_to_cpu(dqp->q_core.d_id); + __entry->flags = dqp->dq_flags; + __entry->nrefs = dqp->q_nrefs; + __entry->res_bcount = dqp->q_res_bcount; + __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); + __entry->icount = be64_to_cpu(dqp->q_core.d_icount); + __entry->blk_hardlimit = + be64_to_cpu(dqp->q_core.d_blk_hardlimit); + __entry->blk_softlimit = + be64_to_cpu(dqp->q_core.d_blk_softlimit); + __entry->ino_hardlimit = + be64_to_cpu(dqp->q_core.d_ino_hardlimit); + __entry->ino_softlimit = + be64_to_cpu(dqp->q_core.d_ino_softlimit); + ), + TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " + "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx " + "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->id, + __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), + __entry->nrefs, + __entry->res_bcount, + __entry->bcount, + __entry->blk_hardlimit, + __entry->blk_softlimit, + __entry->icount, + __entry->ino_hardlimit, + __entry->ino_softlimit) +) + +#define DEFINE_DQUOT_EVENT(name) \ +DEFINE_EVENT(xfs_dquot_class, name, \ + TP_PROTO(struct xfs_dquot *dqp), \ + TP_ARGS(dqp)) +DEFINE_DQUOT_EVENT(xfs_dqadjust); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); +DEFINE_DQUOT_EVENT(xfs_dqreclaim_unlink); +DEFINE_DQUOT_EVENT(xfs_dqattach_found); +DEFINE_DQUOT_EVENT(xfs_dqattach_get); +DEFINE_DQUOT_EVENT(xfs_dqinit); +DEFINE_DQUOT_EVENT(xfs_dqreuse); +DEFINE_DQUOT_EVENT(xfs_dqalloc); +DEFINE_DQUOT_EVENT(xfs_dqtobp_read); +DEFINE_DQUOT_EVENT(xfs_dqread); +DEFINE_DQUOT_EVENT(xfs_dqread_fail); +DEFINE_DQUOT_EVENT(xfs_dqlookup_found); +DEFINE_DQUOT_EVENT(xfs_dqlookup_want); +DEFINE_DQUOT_EVENT(xfs_dqlookup_freelist); +DEFINE_DQUOT_EVENT(xfs_dqlookup_done); +DEFINE_DQUOT_EVENT(xfs_dqget_hit); +DEFINE_DQUOT_EVENT(xfs_dqget_miss); +DEFINE_DQUOT_EVENT(xfs_dqput); +DEFINE_DQUOT_EVENT(xfs_dqput_wait); +DEFINE_DQUOT_EVENT(xfs_dqput_free); +DEFINE_DQUOT_EVENT(xfs_dqrele); +DEFINE_DQUOT_EVENT(xfs_dqflush); +DEFINE_DQUOT_EVENT(xfs_dqflush_force); +DEFINE_DQUOT_EVENT(xfs_dqflush_done); + +DECLARE_EVENT_CLASS(xfs_loggrant_class, + TP_PROTO(struct log *log, struct xlog_ticket *tic), + TP_ARGS(log, tic), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned, trans_type) + __field(char, ocnt) + __field(char, cnt) + __field(int, curr_res) + __field(int, unit_res) + __field(unsigned int, flags) + __field(int, reserveq) + __field(int, writeq) + __field(int, grant_reserve_cycle) + __field(int, grant_reserve_bytes) + __field(int, grant_write_cycle) + __field(int, grant_write_bytes) + __field(int, curr_cycle) + __field(int, curr_block) + __field(xfs_lsn_t, tail_lsn) + ), + TP_fast_assign( + __entry->dev = log->l_mp->m_super->s_dev; + __entry->trans_type = tic->t_trans_type; + __entry->ocnt = tic->t_ocnt; + __entry->cnt = tic->t_cnt; + __entry->curr_res = tic->t_curr_res; + __entry->unit_res = tic->t_unit_res; + __entry->flags = tic->t_flags; + __entry->reserveq = list_empty(&log->l_reserveq); + __entry->writeq = list_empty(&log->l_writeq); + xlog_crack_grant_head(&log->l_grant_reserve_head, + &__entry->grant_reserve_cycle, + &__entry->grant_reserve_bytes); + xlog_crack_grant_head(&log->l_grant_write_head, + &__entry->grant_write_cycle, + &__entry->grant_write_bytes); + __entry->curr_cycle = log->l_curr_cycle; + __entry->curr_block = log->l_curr_block; + __entry->tail_lsn = atomic64_read(&log->l_tail_lsn); + ), + TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " + "t_unit_res %u t_flags %s reserveq %s " + "writeq %s grant_reserve_cycle %d " + "grant_reserve_bytes %d grant_write_cycle %d " + "grant_write_bytes %d curr_cycle %d curr_block %d " + "tail_cycle %d tail_block %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), + __entry->ocnt, + __entry->cnt, + __entry->curr_res, + __entry->unit_res, + __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), + __entry->reserveq ? "empty" : "active", + __entry->writeq ? "empty" : "active", + __entry->grant_reserve_cycle, + __entry->grant_reserve_bytes, + __entry->grant_write_cycle, + __entry->grant_write_bytes, + __entry->curr_cycle, + __entry->curr_block, + CYCLE_LSN(__entry->tail_lsn), + BLOCK_LSN(__entry->tail_lsn) + ) +) + +#define DEFINE_LOGGRANT_EVENT(name) \ +DEFINE_EVENT(xfs_loggrant_class, name, \ + TP_PROTO(struct log *log, struct xlog_ticket *tic), \ + TP_ARGS(log, tic)) +DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); +DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); +DEFINE_LOGGRANT_EVENT(xfs_log_reserve); +DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_error); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); +DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_enter); +DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit); +DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub); + +DECLARE_EVENT_CLASS(xfs_file_class, + TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), + TP_ARGS(ip, count, offset, flags), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_fsize_t, new_size) + __field(loff_t, offset) + __field(size_t, count) + __field(int, flags) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->new_size = ip->i_new_size; + __entry->offset = offset; + __entry->count = count; + __entry->flags = flags; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " + "offset 0x%llx count 0x%zx ioflags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->new_size, + __entry->offset, + __entry->count, + __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) +) + +#define DEFINE_RW_EVENT(name) \ +DEFINE_EVENT(xfs_file_class, name, \ + TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \ + TP_ARGS(ip, count, offset, flags)) +DEFINE_RW_EVENT(xfs_file_read); +DEFINE_RW_EVENT(xfs_file_buffered_write); +DEFINE_RW_EVENT(xfs_file_direct_write); +DEFINE_RW_EVENT(xfs_file_splice_read); +DEFINE_RW_EVENT(xfs_file_splice_write); + +DECLARE_EVENT_CLASS(xfs_page_class, + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), + TP_ARGS(inode, page, off), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(pgoff_t, pgoff) + __field(loff_t, size) + __field(unsigned long, offset) + __field(int, delalloc) + __field(int, unwritten) + ), + TP_fast_assign( + int delalloc = -1, unwritten = -1; + + if (page_has_buffers(page)) + xfs_count_page_state(page, &delalloc, &unwritten); + __entry->dev = inode->i_sb->s_dev; + __entry->ino = XFS_I(inode)->i_ino; + __entry->pgoff = page_offset(page); + __entry->size = i_size_read(inode); + __entry->offset = off; + __entry->delalloc = delalloc; + __entry->unwritten = unwritten; + ), + TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " + "delalloc %d unwritten %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->pgoff, + __entry->size, + __entry->offset, + __entry->delalloc, + __entry->unwritten) +) + +#define DEFINE_PAGE_EVENT(name) \ +DEFINE_EVENT(xfs_page_class, name, \ + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ + TP_ARGS(inode, page, off)) +DEFINE_PAGE_EVENT(xfs_writepage); +DEFINE_PAGE_EVENT(xfs_releasepage); +DEFINE_PAGE_EVENT(xfs_invalidatepage); + +DECLARE_EVENT_CLASS(xfs_imap_class, + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, + int type, struct xfs_bmbt_irec *irec), + TP_ARGS(ip, offset, count, type, irec), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(loff_t, size) + __field(loff_t, new_size) + __field(loff_t, offset) + __field(size_t, count) + __field(int, type) + __field(xfs_fileoff_t, startoff) + __field(xfs_fsblock_t, startblock) + __field(xfs_filblks_t, blockcount) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->new_size = ip->i_new_size; + __entry->offset = offset; + __entry->count = count; + __entry->type = type; + __entry->startoff = irec ? irec->br_startoff : 0; + __entry->startblock = irec ? irec->br_startblock : 0; + __entry->blockcount = irec ? irec->br_blockcount : 0; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " + "offset 0x%llx count %zd type %s " + "startoff 0x%llx startblock %lld blockcount 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->new_size, + __entry->offset, + __entry->count, + __print_symbolic(__entry->type, XFS_IO_TYPES), + __entry->startoff, + (__int64_t)__entry->startblock, + __entry->blockcount) +) + +#define DEFINE_IOMAP_EVENT(name) \ +DEFINE_EVENT(xfs_imap_class, name, \ + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ + int type, struct xfs_bmbt_irec *irec), \ + TP_ARGS(ip, offset, count, type, irec)) +DEFINE_IOMAP_EVENT(xfs_map_blocks_found); +DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc); +DEFINE_IOMAP_EVENT(xfs_get_blocks_found); +DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); + +DECLARE_EVENT_CLASS(xfs_simple_io_class, + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), + TP_ARGS(ip, offset, count), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(loff_t, isize) + __field(loff_t, disize) + __field(loff_t, new_size) + __field(loff_t, offset) + __field(size_t, count) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->isize = ip->i_size; + __entry->disize = ip->i_d.di_size; + __entry->new_size = ip->i_new_size; + __entry->offset = offset; + __entry->count = count; + ), + TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx " + "offset 0x%llx count %zd", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->isize, + __entry->disize, + __entry->new_size, + __entry->offset, + __entry->count) +); + +#define DEFINE_SIMPLE_IO_EVENT(name) \ +DEFINE_EVENT(xfs_simple_io_class, name, \ + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \ + TP_ARGS(ip, offset, count)) +DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); +DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); +DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); +DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); + +DECLARE_EVENT_CLASS(xfs_itrunc_class, + TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), + TP_ARGS(ip, new_size), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_fsize_t, new_size) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->new_size = new_size; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->new_size) +) + +#define DEFINE_ITRUNC_EVENT(name) \ +DEFINE_EVENT(xfs_itrunc_class, name, \ + TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ + TP_ARGS(ip, new_size)) +DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start); +DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end); + +TRACE_EVENT(xfs_pagecache_inval, + TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), + TP_ARGS(ip, start, finish), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_off_t, start) + __field(xfs_off_t, finish) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->start = start; + __entry->finish = finish; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx start 0x%llx finish 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->start, + __entry->finish) +); + +TRACE_EVENT(xfs_bunmap, + TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t bno, xfs_filblks_t len, + int flags, unsigned long caller_ip), + TP_ARGS(ip, bno, len, flags, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(xfs_fsize_t, size) + __field(xfs_fileoff_t, bno) + __field(xfs_filblks_t, len) + __field(unsigned long, caller_ip) + __field(int, flags) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->ino = ip->i_ino; + __entry->size = ip->i_d.di_size; + __entry->bno = bno; + __entry->len = len; + __entry->caller_ip = caller_ip; + __entry->flags = flags; + ), + TP_printk("dev %d:%d ino 0x%llx size 0x%llx bno 0x%llx len 0x%llx" + "flags %s caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->bno, + __entry->len, + __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS), + (void *)__entry->caller_ip) + +); + +DECLARE_EVENT_CLASS(xfs_busy_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(mp, agno, agbno, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len) +); +#define DEFINE_BUSY_EVENT(name) \ +DEFINE_EVENT(xfs_busy_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ + xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(mp, agno, agbno, len)) +DEFINE_BUSY_EVENT(xfs_alloc_busy); +DEFINE_BUSY_EVENT(xfs_alloc_busy_enomem); +DEFINE_BUSY_EVENT(xfs_alloc_busy_force); +DEFINE_BUSY_EVENT(xfs_alloc_busy_reuse); +DEFINE_BUSY_EVENT(xfs_alloc_busy_clear); + +TRACE_EVENT(xfs_alloc_busy_trim, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len, + xfs_agblock_t tbno, xfs_extlen_t tlen), + TP_ARGS(mp, agno, agbno, len, tbno, tlen), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(xfs_agblock_t, tbno) + __field(xfs_extlen_t, tlen) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->tbno = tbno; + __entry->tlen = tlen; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u tbno %u tlen %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->tbno, + __entry->tlen) +); + +TRACE_EVENT(xfs_trans_commit_lsn, + TP_PROTO(struct xfs_trans *trans), + TP_ARGS(trans), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(struct xfs_trans *, tp) + __field(xfs_lsn_t, lsn) + ), + TP_fast_assign( + __entry->dev = trans->t_mountp->m_super->s_dev; + __entry->tp = trans; + __entry->lsn = trans->t_commit_lsn; + ), + TP_printk("dev %d:%d trans 0x%p commit_lsn 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tp, + __entry->lsn) +); + +TRACE_EVENT(xfs_agf, + TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, + unsigned long caller_ip), + TP_ARGS(mp, agf, flags, caller_ip), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(int, flags) + __field(__u32, length) + __field(__u32, bno_root) + __field(__u32, cnt_root) + __field(__u32, bno_level) + __field(__u32, cnt_level) + __field(__u32, flfirst) + __field(__u32, fllast) + __field(__u32, flcount) + __field(__u32, freeblks) + __field(__u32, longest) + __field(unsigned long, caller_ip) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = be32_to_cpu(agf->agf_seqno), + __entry->flags = flags; + __entry->length = be32_to_cpu(agf->agf_length), + __entry->bno_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), + __entry->cnt_root = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]), + __entry->bno_level = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]), + __entry->cnt_level = + be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]), + __entry->flfirst = be32_to_cpu(agf->agf_flfirst), + __entry->fllast = be32_to_cpu(agf->agf_fllast), + __entry->flcount = be32_to_cpu(agf->agf_flcount), + __entry->freeblks = be32_to_cpu(agf->agf_freeblks), + __entry->longest = be32_to_cpu(agf->agf_longest); + __entry->caller_ip = caller_ip; + ), + TP_printk("dev %d:%d agno %u flags %s length %u roots b %u c %u " + "levels b %u c %u flfirst %u fllast %u flcount %u " + "freeblks %u longest %u caller %pf", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __print_flags(__entry->flags, "|", XFS_AGF_FLAGS), + __entry->length, + __entry->bno_root, + __entry->cnt_root, + __entry->bno_level, + __entry->cnt_level, + __entry->flfirst, + __entry->fllast, + __entry->flcount, + __entry->freeblks, + __entry->longest, + (void *)__entry->caller_ip) +); + +TRACE_EVENT(xfs_free_extent, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, + xfs_extlen_t len, bool isfl, int haveleft, int haveright), + TP_ARGS(mp, agno, agbno, len, isfl, haveleft, haveright), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + __field(int, isfl) + __field(int, haveleft) + __field(int, haveright) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + __entry->isfl = isfl; + __entry->haveleft = haveleft; + __entry->haveright = haveright; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u isfl %d %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len, + __entry->isfl, + __entry->haveleft ? + (__entry->haveright ? "both" : "left") : + (__entry->haveright ? "right" : "none")) + +); + +DECLARE_EVENT_CLASS(xfs_alloc_class, + TP_PROTO(struct xfs_alloc_arg *args), + TP_ARGS(args), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, minlen) + __field(xfs_extlen_t, maxlen) + __field(xfs_extlen_t, mod) + __field(xfs_extlen_t, prod) + __field(xfs_extlen_t, minleft) + __field(xfs_extlen_t, total) + __field(xfs_extlen_t, alignment) + __field(xfs_extlen_t, minalignslop) + __field(xfs_extlen_t, len) + __field(short, type) + __field(short, otype) + __field(char, wasdel) + __field(char, wasfromfl) + __field(char, isfl) + __field(char, userdata) + __field(xfs_fsblock_t, firstblock) + ), + TP_fast_assign( + __entry->dev = args->mp->m_super->s_dev; + __entry->agno = args->agno; + __entry->agbno = args->agbno; + __entry->minlen = args->minlen; + __entry->maxlen = args->maxlen; + __entry->mod = args->mod; + __entry->prod = args->prod; + __entry->minleft = args->minleft; + __entry->total = args->total; + __entry->alignment = args->alignment; + __entry->minalignslop = args->minalignslop; + __entry->len = args->len; + __entry->type = args->type; + __entry->otype = args->otype; + __entry->wasdel = args->wasdel; + __entry->wasfromfl = args->wasfromfl; + __entry->isfl = args->isfl; + __entry->userdata = args->userdata; + __entry->firstblock = args->firstblock; + ), + TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " + "prod %u minleft %u total %u alignment %u minalignslop %u " + "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " + "userdata %d firstblock 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->minlen, + __entry->maxlen, + __entry->mod, + __entry->prod, + __entry->minleft, + __entry->total, + __entry->alignment, + __entry->minalignslop, + __entry->len, + __print_symbolic(__entry->type, XFS_ALLOC_TYPES), + __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), + __entry->wasdel, + __entry->wasfromfl, + __entry->isfl, + __entry->userdata, + (unsigned long long)__entry->firstblock) +) + +#define DEFINE_ALLOC_EVENT(name) \ +DEFINE_EVENT(xfs_alloc_class, name, \ + TP_PROTO(struct xfs_alloc_arg *args), \ + TP_ARGS(args)) +DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); +DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound); +DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); +DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); +DEFINE_ALLOC_EVENT(xfs_alloc_near_first); +DEFINE_ALLOC_EVENT(xfs_alloc_near_greater); +DEFINE_ALLOC_EVENT(xfs_alloc_near_lesser); +DEFINE_ALLOC_EVENT(xfs_alloc_near_error); +DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry); +DEFINE_ALLOC_EVENT(xfs_alloc_near_busy); +DEFINE_ALLOC_EVENT(xfs_alloc_size_neither); +DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry); +DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft); +DEFINE_ALLOC_EVENT(xfs_alloc_size_done); +DEFINE_ALLOC_EVENT(xfs_alloc_size_error); +DEFINE_ALLOC_EVENT(xfs_alloc_size_busy); +DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist); +DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough); +DEFINE_ALLOC_EVENT(xfs_alloc_small_done); +DEFINE_ALLOC_EVENT(xfs_alloc_small_error); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed); +DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed); + +DECLARE_EVENT_CLASS(xfs_dir2_class, + TP_PROTO(struct xfs_da_args *args), + TP_ARGS(args), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __dynamic_array(char, name, args->namelen) + __field(int, namelen) + __field(xfs_dahash_t, hashval) + __field(xfs_ino_t, inumber) + __field(int, op_flags) + ), + TP_fast_assign( + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; + __entry->ino = args->dp->i_ino; + if (args->namelen) + memcpy(__get_str(name), args->name, args->namelen); + __entry->namelen = args->namelen; + __entry->hashval = args->hashval; + __entry->inumber = args->inumber; + __entry->op_flags = args->op_flags; + ), + TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " + "inumber 0x%llx op_flags %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->namelen, + __entry->namelen ? __get_str(name) : NULL, + __entry->namelen, + __entry->hashval, + __entry->inumber, + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) +) + +#define DEFINE_DIR2_EVENT(name) \ +DEFINE_EVENT(xfs_dir2_class, name, \ + TP_PROTO(struct xfs_da_args *args), \ + TP_ARGS(args)) +DEFINE_DIR2_EVENT(xfs_dir2_sf_addname); +DEFINE_DIR2_EVENT(xfs_dir2_sf_create); +DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup); +DEFINE_DIR2_EVENT(xfs_dir2_sf_replace); +DEFINE_DIR2_EVENT(xfs_dir2_sf_removename); +DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4); +DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8); +DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block); +DEFINE_DIR2_EVENT(xfs_dir2_block_addname); +DEFINE_DIR2_EVENT(xfs_dir2_block_lookup); +DEFINE_DIR2_EVENT(xfs_dir2_block_replace); +DEFINE_DIR2_EVENT(xfs_dir2_block_removename); +DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf); +DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf); +DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname); +DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup); +DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace); +DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename); +DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block); +DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node); +DEFINE_DIR2_EVENT(xfs_dir2_node_addname); +DEFINE_DIR2_EVENT(xfs_dir2_node_lookup); +DEFINE_DIR2_EVENT(xfs_dir2_node_replace); +DEFINE_DIR2_EVENT(xfs_dir2_node_removename); +DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf); + +DECLARE_EVENT_CLASS(xfs_dir2_space_class, + TP_PROTO(struct xfs_da_args *args, int idx), + TP_ARGS(args, idx), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, op_flags) + __field(int, idx) + ), + TP_fast_assign( + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; + __entry->ino = args->dp->i_ino; + __entry->op_flags = args->op_flags; + __entry->idx = idx; + ), + TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), + __entry->idx) +) + +#define DEFINE_DIR2_SPACE_EVENT(name) \ +DEFINE_EVENT(xfs_dir2_space_class, name, \ + TP_PROTO(struct xfs_da_args *args, int idx), \ + TP_ARGS(args, idx)) +DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add); +DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove); +DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode); +DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode); + +TRACE_EVENT(xfs_dir2_leafn_moveents, + TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count), + TP_ARGS(args, src_idx, dst_idx, count), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(int, op_flags) + __field(int, src_idx) + __field(int, dst_idx) + __field(int, count) + ), + TP_fast_assign( + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; + __entry->ino = args->dp->i_ino; + __entry->op_flags = args->op_flags; + __entry->src_idx = src_idx; + __entry->dst_idx = dst_idx; + __entry->count = count; + ), + TP_printk("dev %d:%d ino 0x%llx op_flags %s " + "src_idx %d dst_idx %d count %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), + __entry->src_idx, + __entry->dst_idx, + __entry->count) +); + +#define XFS_SWAPEXT_INODES \ + { 0, "target" }, \ + { 1, "temp" } + +#define XFS_INODE_FORMAT_STR \ + { 0, "invalid" }, \ + { 1, "local" }, \ + { 2, "extent" }, \ + { 3, "btree" } + +DECLARE_EVENT_CLASS(xfs_swap_extent_class, + TP_PROTO(struct xfs_inode *ip, int which), + TP_ARGS(ip, which), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, which) + __field(xfs_ino_t, ino) + __field(int, format) + __field(int, nex) + __field(int, max_nex) + __field(int, broot_size) + __field(int, fork_off) + ), + TP_fast_assign( + __entry->dev = VFS_I(ip)->i_sb->s_dev; + __entry->which = which; + __entry->ino = ip->i_ino; + __entry->format = ip->i_d.di_format; + __entry->nex = ip->i_d.di_nextents; + __entry->max_nex = ip->i_df.if_ext_max; + __entry->broot_size = ip->i_df.if_broot_bytes; + __entry->fork_off = XFS_IFORK_BOFF(ip); + ), + TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, " + "Max in-fork extents %d, broot size %d, fork offset %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __print_symbolic(__entry->which, XFS_SWAPEXT_INODES), + __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR), + __entry->nex, + __entry->max_nex, + __entry->broot_size, + __entry->fork_off) +) + +#define DEFINE_SWAPEXT_EVENT(name) \ +DEFINE_EVENT(xfs_swap_extent_class, name, \ + TP_PROTO(struct xfs_inode *ip, int which), \ + TP_ARGS(ip, which)) + +DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before); +DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); + +DECLARE_EVENT_CLASS(xfs_log_recover_item_class, + TP_PROTO(struct log *log, struct xlog_recover *trans, + struct xlog_recover_item *item, int pass), + TP_ARGS(log, trans, item, pass), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, item) + __field(xlog_tid_t, tid) + __field(int, type) + __field(int, pass) + __field(int, count) + __field(int, total) + ), + TP_fast_assign( + __entry->dev = log->l_mp->m_super->s_dev; + __entry->item = (unsigned long)item; + __entry->tid = trans->r_log_tid; + __entry->type = ITEM_TYPE(item); + __entry->pass = pass; + __entry->count = item->ri_cnt; + __entry->total = item->ri_total; + ), + TP_printk("dev %d:%d trans 0x%x, pass %d, item 0x%p, item type %s " + "item region count/total %d/%d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tid, + __entry->pass, + (void *)__entry->item, + __print_symbolic(__entry->type, XFS_LI_TYPE_DESC), + __entry->count, + __entry->total) +) + +#define DEFINE_LOG_RECOVER_ITEM(name) \ +DEFINE_EVENT(xfs_log_recover_item_class, name, \ + TP_PROTO(struct log *log, struct xlog_recover *trans, \ + struct xlog_recover_item *item, int pass), \ + TP_ARGS(log, trans, item, pass)) + +DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add); +DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont); +DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head); +DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail); +DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); + +DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, + TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), + TP_ARGS(log, buf_f), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(__int64_t, blkno) + __field(unsigned short, len) + __field(unsigned short, flags) + __field(unsigned short, size) + __field(unsigned int, map_size) + ), + TP_fast_assign( + __entry->dev = log->l_mp->m_super->s_dev; + __entry->blkno = buf_f->blf_blkno; + __entry->len = buf_f->blf_len; + __entry->flags = buf_f->blf_flags; + __entry->size = buf_f->blf_size; + __entry->map_size = buf_f->blf_map_size; + ), + TP_printk("dev %d:%d blkno 0x%llx, len %u, flags 0x%x, size %d, " + "map_size %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->blkno, + __entry->len, + __entry->flags, + __entry->size, + __entry->map_size) +) + +#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ +DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ + TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \ + TP_ARGS(log, buf_f)) + +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf); +DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); + +DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, + TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), + TP_ARGS(log, in_f), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(unsigned short, size) + __field(int, fields) + __field(unsigned short, asize) + __field(unsigned short, dsize) + __field(__int64_t, blkno) + __field(int, len) + __field(int, boffset) + ), + TP_fast_assign( + __entry->dev = log->l_mp->m_super->s_dev; + __entry->ino = in_f->ilf_ino; + __entry->size = in_f->ilf_size; + __entry->fields = in_f->ilf_fields; + __entry->asize = in_f->ilf_asize; + __entry->dsize = in_f->ilf_dsize; + __entry->blkno = in_f->ilf_blkno; + __entry->len = in_f->ilf_len; + __entry->boffset = in_f->ilf_boffset; + ), + TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, " + "dsize %d, blkno 0x%llx, len %d, boffset %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->size, + __entry->fields, + __entry->asize, + __entry->dsize, + __entry->blkno, + __entry->len, + __entry->boffset) +) +#define DEFINE_LOG_RECOVER_INO_ITEM(name) \ +DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ + TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \ + TP_ARGS(log, in_f)) + +DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); +DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel); +DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip); + +DECLARE_EVENT_CLASS(xfs_discard_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(mp, agno, agbno, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u\n", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len) +) + +#define DEFINE_DISCARD_EVENT(name) \ +DEFINE_EVENT(xfs_discard_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ + xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(mp, agno, agbno, len)) +DEFINE_DISCARD_EVENT(xfs_discard_extent); +DEFINE_DISCARD_EVENT(xfs_discard_toosmall); +DEFINE_DISCARD_EVENT(xfs_discard_exclude); +DEFINE_DISCARD_EVENT(xfs_discard_busy); + +#endif /* _TRACE_XFS_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE xfs_trace +#include diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c new file mode 100644 index 0000000..4d00ee6 --- /dev/null +++ b/fs/xfs/xfs_trans_dquot.c @@ -0,0 +1,890 @@ +/* + * Copyright (c) 2000-2002 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_alloc.h" +#include "xfs_quota.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_itable.h" +#include "xfs_bmap.h" +#include "xfs_rtalloc.h" +#include "xfs_error.h" +#include "xfs_attr.h" +#include "xfs_buf_item.h" +#include "xfs_trans_priv.h" +#include "xfs_qm.h" + +STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); + +/* + * Add the locked dquot to the transaction. + * The dquot must be locked, and it cannot be associated with any + * transaction. + */ +void +xfs_trans_dqjoin( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + ASSERT(dqp->q_transp != tp); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(dqp->q_logitem.qli_dquot == dqp); + + /* + * Get a log_item_desc to point at the new item. + */ + xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); + + /* + * Initialize d_transp so we can later determine if this dquot is + * associated with this transaction. + */ + dqp->q_transp = tp; +} + + +/* + * This is called to mark the dquot as needing + * to be logged when the transaction is committed. The dquot must + * already be associated with the given transaction. + * Note that it marks the entire transaction as dirty. In the ordinary + * case, this gets called via xfs_trans_commit, after the transaction + * is already dirty. However, there's nothing stop this from getting + * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY + * flag. + */ +void +xfs_trans_log_dquot( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + ASSERT(dqp->q_transp == tp); + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + + tp->t_flags |= XFS_TRANS_DIRTY; + dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY; +} + +/* + * Carry forward whatever is left of the quota blk reservation to + * the spanky new transaction + */ +void +xfs_trans_dup_dqinfo( + xfs_trans_t *otp, + xfs_trans_t *ntp) +{ + xfs_dqtrx_t *oq, *nq; + int i,j; + xfs_dqtrx_t *oqa, *nqa; + + if (!otp->t_dqinfo) + return; + + xfs_trans_alloc_dqinfo(ntp); + oqa = otp->t_dqinfo->dqa_usrdquots; + nqa = ntp->t_dqinfo->dqa_usrdquots; + + /* + * Because the quota blk reservation is carried forward, + * it is also necessary to carry forward the DQ_DIRTY flag. + */ + if(otp->t_flags & XFS_TRANS_DQ_DIRTY) + ntp->t_flags |= XFS_TRANS_DQ_DIRTY; + + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + if (oqa[i].qt_dquot == NULL) + break; + oq = &oqa[i]; + nq = &nqa[i]; + + nq->qt_dquot = oq->qt_dquot; + nq->qt_bcount_delta = nq->qt_icount_delta = 0; + nq->qt_rtbcount_delta = 0; + + /* + * Transfer whatever is left of the reservations. + */ + nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used; + oq->qt_blk_res = oq->qt_blk_res_used; + + nq->qt_rtblk_res = oq->qt_rtblk_res - + oq->qt_rtblk_res_used; + oq->qt_rtblk_res = oq->qt_rtblk_res_used; + + nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; + oq->qt_ino_res = oq->qt_ino_res_used; + + } + oqa = otp->t_dqinfo->dqa_grpdquots; + nqa = ntp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * Wrap around mod_dquot to account for both user and group quotas. + */ +void +xfs_trans_mod_dquot_byino( + xfs_trans_t *tp, + xfs_inode_t *ip, + uint field, + long delta) +{ + xfs_mount_t *mp = tp->t_mountp; + + if (!XFS_IS_QUOTA_RUNNING(mp) || + !XFS_IS_QUOTA_ON(mp) || + ip->i_ino == mp->m_sb.sb_uquotino || + ip->i_ino == mp->m_sb.sb_gquotino) + return; + + if (tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + + if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) + (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); + if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot) + (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); +} + +STATIC xfs_dqtrx_t * +xfs_trans_get_dqtrx( + xfs_trans_t *tp, + xfs_dquot_t *dqp) +{ + int i; + xfs_dqtrx_t *qa; + + qa = XFS_QM_ISUDQ(dqp) ? + tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots; + + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + if (qa[i].qt_dquot == NULL || + qa[i].qt_dquot == dqp) + return &qa[i]; + } + + return NULL; +} + +/* + * Make the changes in the transaction structure. + * The moral equivalent to xfs_trans_mod_sb(). + * We don't touch any fields in the dquot, so we don't care + * if it's locked or not (most of the time it won't be). + */ +void +xfs_trans_mod_dquot( + xfs_trans_t *tp, + xfs_dquot_t *dqp, + uint field, + long delta) +{ + xfs_dqtrx_t *qtrx; + + ASSERT(tp); + ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); + qtrx = NULL; + + if (tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + /* + * Find either the first free slot or the slot that belongs + * to this dquot. + */ + qtrx = xfs_trans_get_dqtrx(tp, dqp); + ASSERT(qtrx); + if (qtrx->qt_dquot == NULL) + qtrx->qt_dquot = dqp; + + switch (field) { + + /* + * regular disk blk reservation + */ + case XFS_TRANS_DQ_RES_BLKS: + qtrx->qt_blk_res += (ulong)delta; + break; + + /* + * inode reservation + */ + case XFS_TRANS_DQ_RES_INOS: + qtrx->qt_ino_res += (ulong)delta; + break; + + /* + * disk blocks used. + */ + case XFS_TRANS_DQ_BCOUNT: + if (qtrx->qt_blk_res && delta > 0) { + qtrx->qt_blk_res_used += (ulong)delta; + ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used); + } + qtrx->qt_bcount_delta += delta; + break; + + case XFS_TRANS_DQ_DELBCOUNT: + qtrx->qt_delbcnt_delta += delta; + break; + + /* + * Inode Count + */ + case XFS_TRANS_DQ_ICOUNT: + if (qtrx->qt_ino_res && delta > 0) { + qtrx->qt_ino_res_used += (ulong)delta; + ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); + } + qtrx->qt_icount_delta += delta; + break; + + /* + * rtblk reservation + */ + case XFS_TRANS_DQ_RES_RTBLKS: + qtrx->qt_rtblk_res += (ulong)delta; + break; + + /* + * rtblk count + */ + case XFS_TRANS_DQ_RTBCOUNT: + if (qtrx->qt_rtblk_res && delta > 0) { + qtrx->qt_rtblk_res_used += (ulong)delta; + ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); + } + qtrx->qt_rtbcount_delta += delta; + break; + + case XFS_TRANS_DQ_DELRTBCOUNT: + qtrx->qt_delrtb_delta += delta; + break; + + default: + ASSERT(0); + } + tp->t_flags |= XFS_TRANS_DQ_DIRTY; +} + + +/* + * Given an array of dqtrx structures, lock all the dquots associated + * and join them to the transaction, provided they have been modified. + * We know that the highest number of dquots (of one type - usr OR grp), + * involved in a transaction is 2 and that both usr and grp combined - 3. + * So, we don't attempt to make this very generic. + */ +STATIC void +xfs_trans_dqlockedjoin( + xfs_trans_t *tp, + xfs_dqtrx_t *q) +{ + ASSERT(q[0].qt_dquot != NULL); + if (q[1].qt_dquot == NULL) { + xfs_dqlock(q[0].qt_dquot); + xfs_trans_dqjoin(tp, q[0].qt_dquot); + } else { + ASSERT(XFS_QM_TRANS_MAXDQS == 2); + xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); + xfs_trans_dqjoin(tp, q[0].qt_dquot); + xfs_trans_dqjoin(tp, q[1].qt_dquot); + } +} + + +/* + * Called by xfs_trans_commit() and similar in spirit to + * xfs_trans_apply_sb_deltas(). + * Go thru all the dquots belonging to this transaction and modify the + * INCORE dquot to reflect the actual usages. + * Unreserve just the reservations done by this transaction. + * dquot is still left locked at exit. + */ +void +xfs_trans_apply_dquot_deltas( + xfs_trans_t *tp) +{ + int i, j; + xfs_dquot_t *dqp; + xfs_dqtrx_t *qtrx, *qa; + xfs_disk_dquot_t *d; + long totalbdelta; + long totalrtbdelta; + + if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) + return; + + ASSERT(tp->t_dqinfo); + qa = tp->t_dqinfo->dqa_usrdquots; + for (j = 0; j < 2; j++) { + if (qa[0].qt_dquot == NULL) { + qa = tp->t_dqinfo->dqa_grpdquots; + continue; + } + + /* + * Lock all of the dquots and join them to the transaction. + */ + xfs_trans_dqlockedjoin(tp, qa); + + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qtrx = &qa[i]; + /* + * The array of dquots is filled + * sequentially, not sparsely. + */ + if ((dqp = qtrx->qt_dquot) == NULL) + break; + + ASSERT(XFS_DQ_IS_LOCKED(dqp)); + ASSERT(dqp->q_transp == tp); + + /* + * adjust the actual number of blocks used + */ + d = &dqp->q_core; + + /* + * The issue here is - sometimes we don't make a blkquota + * reservation intentionally to be fair to users + * (when the amount is small). On the other hand, + * delayed allocs do make reservations, but that's + * outside of a transaction, so we have no + * idea how much was really reserved. + * So, here we've accumulated delayed allocation blks and + * non-delay blks. The assumption is that the + * delayed ones are always reserved (outside of a + * transaction), and the others may or may not have + * quota reservations. + */ + totalbdelta = qtrx->qt_bcount_delta + + qtrx->qt_delbcnt_delta; + totalrtbdelta = qtrx->qt_rtbcount_delta + + qtrx->qt_delrtb_delta; +#ifdef DEBUG + if (totalbdelta < 0) + ASSERT(be64_to_cpu(d->d_bcount) >= + -totalbdelta); + + if (totalrtbdelta < 0) + ASSERT(be64_to_cpu(d->d_rtbcount) >= + -totalrtbdelta); + + if (qtrx->qt_icount_delta < 0) + ASSERT(be64_to_cpu(d->d_icount) >= + -qtrx->qt_icount_delta); +#endif + if (totalbdelta) + be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); + + if (qtrx->qt_icount_delta) + be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); + + if (totalrtbdelta) + be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); + + /* + * Get any default limits in use. + * Start/reset the timer(s) if needed. + */ + if (d->d_id) { + xfs_qm_adjust_dqlimits(tp->t_mountp, d); + xfs_qm_adjust_dqtimers(tp->t_mountp, d); + } + + dqp->dq_flags |= XFS_DQ_DIRTY; + /* + * add this to the list of items to get logged + */ + xfs_trans_log_dquot(tp, dqp); + /* + * Take off what's left of the original reservation. + * In case of delayed allocations, there's no + * reservation that a transaction structure knows of. + */ + if (qtrx->qt_blk_res != 0) { + if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { + if (qtrx->qt_blk_res > + qtrx->qt_blk_res_used) + dqp->q_res_bcount -= (xfs_qcnt_t) + (qtrx->qt_blk_res - + qtrx->qt_blk_res_used); + else + dqp->q_res_bcount -= (xfs_qcnt_t) + (qtrx->qt_blk_res_used - + qtrx->qt_blk_res); + } + } else { + /* + * These blks were never reserved, either inside + * a transaction or outside one (in a delayed + * allocation). Also, this isn't always a + * negative number since we sometimes + * deliberately skip quota reservations. + */ + if (qtrx->qt_bcount_delta) { + dqp->q_res_bcount += + (xfs_qcnt_t)qtrx->qt_bcount_delta; + } + } + /* + * Adjust the RT reservation. + */ + if (qtrx->qt_rtblk_res != 0) { + if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) { + if (qtrx->qt_rtblk_res > + qtrx->qt_rtblk_res_used) + dqp->q_res_rtbcount -= (xfs_qcnt_t) + (qtrx->qt_rtblk_res - + qtrx->qt_rtblk_res_used); + else + dqp->q_res_rtbcount -= (xfs_qcnt_t) + (qtrx->qt_rtblk_res_used - + qtrx->qt_rtblk_res); + } + } else { + if (qtrx->qt_rtbcount_delta) + dqp->q_res_rtbcount += + (xfs_qcnt_t)qtrx->qt_rtbcount_delta; + } + + /* + * Adjust the inode reservation. + */ + if (qtrx->qt_ino_res != 0) { + ASSERT(qtrx->qt_ino_res >= + qtrx->qt_ino_res_used); + if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) + dqp->q_res_icount -= (xfs_qcnt_t) + (qtrx->qt_ino_res - + qtrx->qt_ino_res_used); + } else { + if (qtrx->qt_icount_delta) + dqp->q_res_icount += + (xfs_qcnt_t)qtrx->qt_icount_delta; + } + + ASSERT(dqp->q_res_bcount >= + be64_to_cpu(dqp->q_core.d_bcount)); + ASSERT(dqp->q_res_icount >= + be64_to_cpu(dqp->q_core.d_icount)); + ASSERT(dqp->q_res_rtbcount >= + be64_to_cpu(dqp->q_core.d_rtbcount)); + } + /* + * Do the group quotas next + */ + qa = tp->t_dqinfo->dqa_grpdquots; + } +} + +/* + * Release the reservations, and adjust the dquots accordingly. + * This is called only when the transaction is being aborted. If by + * any chance we have done dquot modifications incore (ie. deltas) already, + * we simply throw those away, since that's the expected behavior + * when a transaction is curtailed without a commit. + */ +void +xfs_trans_unreserve_and_mod_dquots( + xfs_trans_t *tp) +{ + int i, j; + xfs_dquot_t *dqp; + xfs_dqtrx_t *qtrx, *qa; + boolean_t locked; + + if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) + return; + + qa = tp->t_dqinfo->dqa_usrdquots; + + for (j = 0; j < 2; j++) { + for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + qtrx = &qa[i]; + /* + * We assume that the array of dquots is filled + * sequentially, not sparsely. + */ + if ((dqp = qtrx->qt_dquot) == NULL) + break; + /* + * Unreserve the original reservation. We don't care + * about the number of blocks used field, or deltas. + * Also we don't bother to zero the fields. + */ + locked = B_FALSE; + if (qtrx->qt_blk_res) { + xfs_dqlock(dqp); + locked = B_TRUE; + dqp->q_res_bcount -= + (xfs_qcnt_t)qtrx->qt_blk_res; + } + if (qtrx->qt_ino_res) { + if (!locked) { + xfs_dqlock(dqp); + locked = B_TRUE; + } + dqp->q_res_icount -= + (xfs_qcnt_t)qtrx->qt_ino_res; + } + + if (qtrx->qt_rtblk_res) { + if (!locked) { + xfs_dqlock(dqp); + locked = B_TRUE; + } + dqp->q_res_rtbcount -= + (xfs_qcnt_t)qtrx->qt_rtblk_res; + } + if (locked) + xfs_dqunlock(dqp); + + } + qa = tp->t_dqinfo->dqa_grpdquots; + } +} + +STATIC void +xfs_quota_warn( + struct xfs_mount *mp, + struct xfs_dquot *dqp, + int type) +{ + /* no warnings for project quotas - we just return ENOSPC later */ + if (dqp->dq_flags & XFS_DQ_PROJ) + return; + quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA, + be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev, + type); +} + +/* + * This reserves disk blocks and inodes against a dquot. + * Flags indicate if the dquot is to be locked here and also + * if the blk reservation is for RT or regular blocks. + * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. + */ +STATIC int +xfs_trans_dqresv( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dquot_t *dqp, + long nblks, + long ninos, + uint flags) +{ + xfs_qcnt_t hardlimit; + xfs_qcnt_t softlimit; + time_t timer; + xfs_qwarncnt_t warns; + xfs_qwarncnt_t warnlimit; + xfs_qcnt_t count; + xfs_qcnt_t *resbcountp; + xfs_quotainfo_t *q = mp->m_quotainfo; + + + xfs_dqlock(dqp); + + if (flags & XFS_TRANS_DQ_RES_BLKS) { + hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); + if (!hardlimit) + hardlimit = q->qi_bhardlimit; + softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); + if (!softlimit) + softlimit = q->qi_bsoftlimit; + timer = be32_to_cpu(dqp->q_core.d_btimer); + warns = be16_to_cpu(dqp->q_core.d_bwarns); + warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; + resbcountp = &dqp->q_res_bcount; + } else { + ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); + hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); + if (!hardlimit) + hardlimit = q->qi_rtbhardlimit; + softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); + if (!softlimit) + softlimit = q->qi_rtbsoftlimit; + timer = be32_to_cpu(dqp->q_core.d_rtbtimer); + warns = be16_to_cpu(dqp->q_core.d_rtbwarns); + warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; + resbcountp = &dqp->q_res_rtbcount; + } + + if ((flags & XFS_QMOPT_FORCE_RES) == 0 && + dqp->q_core.d_id && + ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || + (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && + (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { + if (nblks > 0) { + /* + * dquot is locked already. See if we'd go over the + * hardlimit or exceed the timelimit if we allocate + * nblks. + */ + if (hardlimit > 0ULL && + hardlimit <= nblks + *resbcountp) { + xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); + goto error_return; + } + if (softlimit > 0ULL && + softlimit <= nblks + *resbcountp) { + if ((timer != 0 && get_seconds() > timer) || + (warns != 0 && warns >= warnlimit)) { + xfs_quota_warn(mp, dqp, + QUOTA_NL_BSOFTLONGWARN); + goto error_return; + } + + xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); + } + } + if (ninos > 0) { + count = be64_to_cpu(dqp->q_core.d_icount); + timer = be32_to_cpu(dqp->q_core.d_itimer); + warns = be16_to_cpu(dqp->q_core.d_iwarns); + warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; + hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); + if (!hardlimit) + hardlimit = q->qi_ihardlimit; + softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); + if (!softlimit) + softlimit = q->qi_isoftlimit; + + if (hardlimit > 0ULL && count >= hardlimit) { + xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); + goto error_return; + } + if (softlimit > 0ULL && count >= softlimit) { + if ((timer != 0 && get_seconds() > timer) || + (warns != 0 && warns >= warnlimit)) { + xfs_quota_warn(mp, dqp, + QUOTA_NL_ISOFTLONGWARN); + goto error_return; + } + xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); + } + } + } + + /* + * Change the reservation, but not the actual usage. + * Note that q_res_bcount = q_core.d_bcount + resv + */ + (*resbcountp) += (xfs_qcnt_t)nblks; + if (ninos != 0) + dqp->q_res_icount += (xfs_qcnt_t)ninos; + + /* + * note the reservation amt in the trans struct too, + * so that the transaction knows how much was reserved by + * it against this particular dquot. + * We don't do this when we are reserving for a delayed allocation, + * because we don't have the luxury of a transaction envelope then. + */ + if (tp) { + ASSERT(tp->t_dqinfo); + ASSERT(flags & XFS_QMOPT_RESBLK_MASK); + if (nblks != 0) + xfs_trans_mod_dquot(tp, dqp, + flags & XFS_QMOPT_RESBLK_MASK, + nblks); + if (ninos != 0) + xfs_trans_mod_dquot(tp, dqp, + XFS_TRANS_DQ_RES_INOS, + ninos); + } + ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); + ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); + ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); + + xfs_dqunlock(dqp); + return 0; + +error_return: + xfs_dqunlock(dqp); + if (flags & XFS_QMOPT_ENOSPC) + return ENOSPC; + return EDQUOT; +} + + +/* + * Given dquot(s), make disk block and/or inode reservations against them. + * The fact that this does the reservation against both the usr and + * grp/prj quotas is important, because this follows a both-or-nothing + * approach. + * + * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. + * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. + * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks + * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks + * dquots are unlocked on return, if they were not locked by caller. + */ +int +xfs_trans_reserve_quota_bydquots( + xfs_trans_t *tp, + xfs_mount_t *mp, + xfs_dquot_t *udqp, + xfs_dquot_t *gdqp, + long nblks, + long ninos, + uint flags) +{ + int resvd = 0, error; + + if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) + return 0; + + if (tp && tp->t_dqinfo == NULL) + xfs_trans_alloc_dqinfo(tp); + + ASSERT(flags & XFS_QMOPT_RESBLK_MASK); + + if (udqp) { + error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, + (flags & ~XFS_QMOPT_ENOSPC)); + if (error) + return error; + resvd = 1; + } + + if (gdqp) { + error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); + if (error) { + /* + * can't do it, so backout previous reservation + */ + if (resvd) { + flags |= XFS_QMOPT_FORCE_RES; + xfs_trans_dqresv(tp, mp, udqp, + -nblks, -ninos, flags); + } + return error; + } + } + + /* + * Didn't change anything critical, so, no need to log + */ + return 0; +} + + +/* + * Lock the dquot and change the reservation if we can. + * This doesn't change the actual usage, just the reservation. + * The inode sent in is locked. + */ +int +xfs_trans_reserve_quota_nblks( + struct xfs_trans *tp, + struct xfs_inode *ip, + long nblks, + long ninos, + uint flags) +{ + struct xfs_mount *mp = ip->i_mount; + + if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) + return 0; + if (XFS_IS_PQUOTA_ON(mp)) + flags |= XFS_QMOPT_ENOSPC; + + ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); + ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == + XFS_TRANS_DQ_RES_RTBLKS || + (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == + XFS_TRANS_DQ_RES_BLKS); + + /* + * Reserve nblks against these dquots, with trans as the mediator. + */ + return xfs_trans_reserve_quota_bydquots(tp, mp, + ip->i_udquot, ip->i_gdquot, + nblks, ninos, flags); +} + +/* + * This routine is called to allocate a quotaoff log item. + */ +xfs_qoff_logitem_t * +xfs_trans_get_qoff_item( + xfs_trans_t *tp, + xfs_qoff_logitem_t *startqoff, + uint flags) +{ + xfs_qoff_logitem_t *q; + + ASSERT(tp != NULL); + + q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); + ASSERT(q != NULL); + + /* + * Get a log_item_desc to point at the new item. + */ + xfs_trans_add_item(tp, &q->qql_item); + return q; +} + + +/* + * This is called to mark the quotaoff logitem as needing + * to be logged when the transaction is committed. The logitem must + * already be associated with the given transaction. + */ +void +xfs_trans_log_quotaoff_item( + xfs_trans_t *tp, + xfs_qoff_logitem_t *qlp) +{ + tp->t_flags |= XFS_TRANS_DIRTY; + qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY; +} + +STATIC void +xfs_trans_alloc_dqinfo( + xfs_trans_t *tp) +{ + tp->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP); +} + +void +xfs_trans_free_dqinfo( + xfs_trans_t *tp) +{ + if (!tp->t_dqinfo) + return; + kmem_zone_free(xfs_Gqm->qm_dqtrxzone, tp->t_dqinfo); + tp->t_dqinfo = NULL; +} diff --git a/fs/xfs/xfs_vnode.h b/fs/xfs/xfs_vnode.h new file mode 100644 index 0000000..7c220b4 --- /dev/null +++ b/fs/xfs/xfs_vnode.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_VNODE_H__ +#define __XFS_VNODE_H__ + +#include "xfs_fs.h" + +struct file; +struct xfs_inode; +struct xfs_iomap; +struct attrlist_cursor_kern; + +/* + * Return values for xfs_inactive. A return value of + * VN_INACTIVE_NOCACHE implies that the file system behavior + * has disassociated its state and bhv_desc_t from the vnode. + */ +#define VN_INACTIVE_CACHE 0 +#define VN_INACTIVE_NOCACHE 1 + +/* + * Flags for read/write calls - same values as IRIX + */ +#define IO_ISDIRECT 0x00004 /* bypass page cache */ +#define IO_INVIS 0x00020 /* don't update inode timestamps */ + +#define XFS_IO_FLAGS \ + { IO_ISDIRECT, "DIRECT" }, \ + { IO_INVIS, "INVIS"} + +/* + * Flush/Invalidate options for vop_toss/flush/flushinval_pages. + */ +#define FI_NONE 0 /* none */ +#define FI_REMAPF 1 /* Do a remapf prior to the operation */ +#define FI_REMAPF_LOCKED 2 /* Do a remapf prior to the operation. + Prevent VM access to the pages until + the operation completes. */ + +/* + * Some useful predicates. + */ +#define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) +#define VN_CACHED(vp) (vp->i_mapping->nrpages) +#define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \ + PAGECACHE_TAG_DIRTY) + + +#endif /* __XFS_VNODE_H__ */ diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c new file mode 100644 index 0000000..87d3e03 --- /dev/null +++ b/fs/xfs/xfs_xattr.c @@ -0,0 +1,241 @@ +/* + * Copyright (C) 2008 Christoph Hellwig. + * Portions Copyright (C) 2000-2008 Silicon Graphics, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "xfs.h" +#include "xfs_da_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_inode.h" +#include "xfs_attr.h" +#include "xfs_attr_leaf.h" +#include "xfs_acl.h" +#include "xfs_vnodeops.h" + +#include +#include + + +static int +xfs_xattr_get(struct dentry *dentry, const char *name, + void *value, size_t size, int xflags) +{ + struct xfs_inode *ip = XFS_I(dentry->d_inode); + int error, asize = size; + + if (strcmp(name, "") == 0) + return -EINVAL; + + /* Convert Linux syscall to XFS internal ATTR flags */ + if (!size) { + xflags |= ATTR_KERNOVAL; + value = NULL; + } + + error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); + if (error) + return error; + return asize; +} + +static int +xfs_xattr_set(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags, int xflags) +{ + struct xfs_inode *ip = XFS_I(dentry->d_inode); + + if (strcmp(name, "") == 0) + return -EINVAL; + + /* Convert Linux syscall to XFS internal ATTR flags */ + if (flags & XATTR_CREATE) + xflags |= ATTR_CREATE; + if (flags & XATTR_REPLACE) + xflags |= ATTR_REPLACE; + + if (!value) + return -xfs_attr_remove(ip, (unsigned char *)name, xflags); + return -xfs_attr_set(ip, (unsigned char *)name, + (void *)value, size, xflags); +} + +static const struct xattr_handler xfs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .flags = 0, /* no flags implies user namespace */ + .get = xfs_xattr_get, + .set = xfs_xattr_set, +}; + +static const struct xattr_handler xfs_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .flags = ATTR_ROOT, + .get = xfs_xattr_get, + .set = xfs_xattr_set, +}; + +static const struct xattr_handler xfs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .flags = ATTR_SECURE, + .get = xfs_xattr_get, + .set = xfs_xattr_set, +}; + +const struct xattr_handler *xfs_xattr_handlers[] = { + &xfs_xattr_user_handler, + &xfs_xattr_trusted_handler, + &xfs_xattr_security_handler, +#ifdef CONFIG_XFS_POSIX_ACL + &xfs_xattr_acl_access_handler, + &xfs_xattr_acl_default_handler, +#endif + NULL +}; + +static unsigned int xfs_xattr_prefix_len(int flags) +{ + if (flags & XFS_ATTR_SECURE) + return sizeof("security"); + else if (flags & XFS_ATTR_ROOT) + return sizeof("trusted"); + else + return sizeof("user"); +} + +static const char *xfs_xattr_prefix(int flags) +{ + if (flags & XFS_ATTR_SECURE) + return xfs_xattr_security_handler.prefix; + else if (flags & XFS_ATTR_ROOT) + return xfs_xattr_trusted_handler.prefix; + else + return xfs_xattr_user_handler.prefix; +} + +static int +xfs_xattr_put_listent( + struct xfs_attr_list_context *context, + int flags, + unsigned char *name, + int namelen, + int valuelen, + unsigned char *value) +{ + unsigned int prefix_len = xfs_xattr_prefix_len(flags); + char *offset; + int arraytop; + + ASSERT(context->count >= 0); + + /* + * Only show root namespace entries if we are actually allowed to + * see them. + */ + if ((flags & XFS_ATTR_ROOT) && !capable(CAP_SYS_ADMIN)) + return 0; + + arraytop = context->count + prefix_len + namelen + 1; + if (arraytop > context->firstu) { + context->count = -1; /* insufficient space */ + return 1; + } + offset = (char *)context->alist + context->count; + strncpy(offset, xfs_xattr_prefix(flags), prefix_len); + offset += prefix_len; + strncpy(offset, (char *)name, namelen); /* real name */ + offset += namelen; + *offset = '\0'; + context->count += prefix_len + namelen + 1; + return 0; +} + +static int +xfs_xattr_put_listent_sizes( + struct xfs_attr_list_context *context, + int flags, + unsigned char *name, + int namelen, + int valuelen, + unsigned char *value) +{ + context->count += xfs_xattr_prefix_len(flags) + namelen + 1; + return 0; +} + +static int +list_one_attr(const char *name, const size_t len, void *data, + size_t size, ssize_t *result) +{ + char *p = data + *result; + + *result += len; + if (!size) + return 0; + if (*result > size) + return -ERANGE; + + strcpy(p, name); + return 0; +} + +ssize_t +xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size) +{ + struct xfs_attr_list_context context; + struct attrlist_cursor_kern cursor = { 0 }; + struct inode *inode = dentry->d_inode; + int error; + + /* + * First read the regular on-disk attributes. + */ + memset(&context, 0, sizeof(context)); + context.dp = XFS_I(inode); + context.cursor = &cursor; + context.resynch = 1; + context.alist = data; + context.bufsize = size; + context.firstu = context.bufsize; + + if (size) + context.put_listent = xfs_xattr_put_listent; + else + context.put_listent = xfs_xattr_put_listent_sizes; + + xfs_attr_list_int(&context); + if (context.count < 0) + return -ERANGE; + + /* + * Then add the two synthetic ACL attributes. + */ + if (posix_acl_access_exists(inode)) { + error = list_one_attr(POSIX_ACL_XATTR_ACCESS, + strlen(POSIX_ACL_XATTR_ACCESS) + 1, + data, size, &context.count); + if (error) + return error; + } + + if (posix_acl_default_exists(inode)) { + error = list_one_attr(POSIX_ACL_XATTR_DEFAULT, + strlen(POSIX_ACL_XATTR_DEFAULT) + 1, + data, size, &context.count); + if (error) + return error; + } + + return context.count; +} diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 3b8e028..e8bffbe 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1,6 +1,6 @@ #include #include -#include "../fs/xfs/linux-2.6/xfs_sysctl.h" +#include "../fs/xfs/xfs_sysctl.h" #include #include #include diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 4e4932a..362da65 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c @@ -1,6 +1,6 @@ #include #include -#include "../fs/xfs/linux-2.6/xfs_sysctl.h" +#include "../fs/xfs/xfs_sysctl.h" #include #include #include -- cgit v0.10.2 From 63d635b21c00069b5ade7640bcbe8ab912dc65d1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 2 Jun 2011 04:29:23 +0000 Subject: ixgbe: Fix FCOE memory leak for DDP packets This patch is meant to fix a memory leak found via code review for FCOE. Specifically on DDP flows the SKBs were being dropped without being recycled, freed, or given to the stack. Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index e86297b..2279039 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1459,8 +1459,10 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, staterr); - if (!ddp_bytes) + if (!ddp_bytes) { + dev_kfree_skb_any(skb); goto next_desc; + } } #endif /* IXGBE_FCOE */ ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); -- cgit v0.10.2 From 1d2101a712b3b7281a19ff6d7bfc16c2ce9d3998 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 22 Jul 2011 06:21:56 +0000 Subject: e1000e: Spurious interrupts & dropped packets with 82577/8/9 in half-duplex On 82577/8/9 in half-duplex when a received packet is passed from the PHY to the MAC, if too many preamble octects are stripped from the packet before arriving at the MAC, it can be misintrepeted as an in-band message rather than an actual frame. For example, if the frame contents resembled an interrupt request in-band message, it would trigger a false interrupt. In most cases, the packet is just dropped. By reducing the number of preamble octets stripped from the beginning of the frame when passing it from the PHY to the MAC, the MAC will interpret the frame properly. An additional uses of the magic PHY_REG(770, 16) have been updated with a define introduced with this patch. Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 4e36978..7525e37 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -163,6 +163,11 @@ #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) #define HV_KMRN_MDIO_SLOW 0x0400 +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { @@ -657,6 +662,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; + u16 phy_reg; /* * We only want to go out to the PHY registers to see if Auto-Neg @@ -689,16 +695,35 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) mac->get_link_status = false; - if (hw->phy.type == e1000_phy_82578) { - ret_val = e1000_link_stall_workaround_hv(hw); - if (ret_val) - goto out; - } - - if (hw->mac.type == e1000_pch2lan) { + switch (hw->mac.type) { + case e1000_pch2lan: ret_val = e1000_k1_workaround_lv(hw); if (ret_val) goto out; + /* fall-thru */ + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + goto out; + } + + /* + * Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; } /* @@ -1355,7 +1380,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) return ret_val; /* Preamble tuning for SSC */ - ret_val = e1e_wphy(hw, PHY_REG(770, 16), 0xA204); + ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); if (ret_val) return ret_val; } -- cgit v0.10.2 From 0ed013e28fe853244f4972cf18d8e2bd62eeb8fc Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 29 Jul 2011 05:52:56 +0000 Subject: e1000e: workaround for packet drop on 82579 at 100Mbps The MAC can drop short packets when the PHY detects noise on the line at 100Mbps due to a timing issue. Workaround the issue by increasing the PLL counter so the PHY properly recognizes the synchronization pattern from the MAC. Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 7525e37..46a5277 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -137,8 +137,9 @@ #define HV_PM_CTRL PHY_REG(770, 17) /* PHY Low Power Idle Control */ -#define I82579_LPI_CTRL PHY_REG(772, 20) -#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 /* EMI Registers */ #define I82579_EMI_ADDR 0x10 @@ -1670,6 +1671,7 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) s32 ret_val = 0; u16 status_reg = 0; u32 mac_reg; + u16 phy_reg; if (hw->mac.type != e1000_pch2lan) goto out; @@ -1684,12 +1686,19 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) mac_reg = er32(FEXTNVM4); mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; - if (status_reg & HV_M_STATUS_SPEED_1000) + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + goto out; + + if (status_reg & HV_M_STATUS_SPEED_1000) { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; - else + phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + } else { mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; - + phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + } ew32(FEXTNVM4, mac_reg); + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); } out: -- cgit v0.10.2 From c6e7f51e73c1bc6044bce989ec503ef2e4758d55 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Fri, 29 Jul 2011 05:53:02 +0000 Subject: e1000e: workaround invalid Tx/Rx tail descriptor register write When the Manageability Engine (ME) is enabled on 82579, it periodically accesses some MAC CSR registers. There is an arbiter in hardware which prevents simultaneous access of these registers by the host software, i.e. the driver. There is a hardware bug in the aribter that signals a host access of the registers later than it actually happens. A write of the Transmit or Receive Descriptor Tail register could result in an incorrect value if the driver and ME perform simultaneous accesses which could result in an access to an invalid memory address. This would return an Unsupported Request which could hang the hardware. Workaround the issue by checking the FWSM register bit24 which is set by ME before it accesses the MAC CSR registers. Signed-off-by: Bruce Allan Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 35916f4..8533ad7 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -155,6 +155,9 @@ struct e1000_info; #define HV_M_STATUS_SPEED_1000 0x0200 #define HV_M_STATUS_LINK_UP 0x0040 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + /* Time to wait before putting the device into D3 if there's no link (in ms). */ #define LINK_TIMEOUT 100 @@ -454,6 +457,7 @@ struct e1000_info { #define FLAG2_DISABLE_AIM (1 << 8) #define FLAG2_CHECK_PHY_HANG (1 << 9) #define FLAG2_NO_DISABLE_RX (1 << 10) +#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 46a5277..54add27 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -814,6 +814,11 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) (adapter->hw.phy.type == e1000_phy_igp_3)) adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + /* Enable workaround for 82579 w/ ME enabled */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; + /* Disable EEE by default until IEEE802.3az spec is finalized */ if (adapter->flags2 & FLAG2_HAS_EEE) adapter->hw.dev_spec.ich8lan.eee_disable = true; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 362f703..2198e61 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -519,6 +519,63 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, } /** + * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa() + * @hw: pointer to the HW structure + * @tail: address of tail descriptor register + * @i: value to write to tail descriptor register + * + * When updating the tail register, the ME could be accessing Host CSR + * registers at the same time. Normally, this is handled in h/w by an + * arbiter but on some parts there is a bug that acknowledges Host accesses + * later than it should which could result in the descriptor register to + * have an incorrect value. Workaround this by checking the FWSM register + * which has bit 24 set while ME is accessing Host CSR registers, wait + * if it is set and try again a number of times. + **/ +static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail, + unsigned int i) +{ + unsigned int j = 0; + + while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) && + (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI)) + udelay(50); + + writel(i, tail); + + if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail))) + return E1000_ERR_SWFW_SYNC; + + return 0; +} + +static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i) +{ + u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail); + struct e1000_hw *hw = &adapter->hw; + + if (e1000e_update_tail_wa(hw, tail, i)) { + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e_err("ME firmware caused invalid RDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i) +{ + u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail); + struct e1000_hw *hw = &adapter->hw; + + if (e1000e_update_tail_wa(hw, tail, i)) { + u32 tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + e_err("ME firmware caused invalid TDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +/** * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended * @adapter: address of board private structure **/ @@ -573,7 +630,10 @@ map_skb: * such as IA-64). */ wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(adapter, i); + else + writel(i, adapter->hw.hw_addr + rx_ring->tail); } i++; if (i == rx_ring->count) @@ -673,7 +733,11 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, * such as IA-64). */ wmb(); - writel(i << 1, adapter->hw.hw_addr + rx_ring->tail); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(adapter, i << 1); + else + writel(i << 1, + adapter->hw.hw_addr + rx_ring->tail); } i++; @@ -756,7 +820,10 @@ check_page: * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); - writel(i, adapter->hw.hw_addr + rx_ring->tail); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(adapter, i); + else + writel(i, adapter->hw.hw_addr + rx_ring->tail); } } @@ -4689,7 +4756,12 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, wmb(); tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); + + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(adapter, i); + else + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* * we need this if more than one processor can write to our tail * at a time, it synchronizes IO on IA64/Altix systems -- cgit v0.10.2 From aaff12039ffd812d0c8bbff50b87b6f1f09bec3e Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Sun, 7 Aug 2011 15:20:18 +0200 Subject: firewire: core: handle ack_busy when fetching the Config ROM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some older Panasonic made camcorders (Panasonic AG-EZ30 and NV-DX110, Grundig Scenos DLC 2000) reject requests with ack_busy_X if a request is sent immediately after they sent a response to a prior transaction. This causes firewire-core to fail probing of the camcorder with "giving up on config rom for node id ...". Consequently, programs like kino or dvgrab are unaware of the presence of a camcorder. Such transaction failures happen also with the ieee1394 driver stack (of the 2.4...2.6 kernel series until 2.6.36 inclusive) but with a lower likelihood, such that kino or dvgrab are generally able to use these camcorders via the older driver stack. The cause for firewire-ohci's or firewire-core's worse behavior is not yet known. Gap count optimization in firewire-core is not the cause. Perhaps the slightly higher latency of transaction completion in the older stack plays a role. (ieee1394: AR-resp DMA context tasklet -> packet completion ktread -> user process; firewire-core: tasklet -> user process.) This change introduces retries and delays after ack_busy_X into firewire-core's Config ROM reader, such that at least firewire-core's probing and /dev/fw* creation are successful. This still leaves the problem that userland processes are facing transaction failures. gscanbus's built-in retry routines deal with them successfully, but neither kino's nor dvgrab's do ever succeed. But at least DV capture with "dvgrab -noavc -card 0" works now. Live video preview in kino works too, but not actual capture. One way to prevent Configuration ROM reading failures in application programs is to modify libraw1394 to synthesize read responses by means of firewire-core's Configuration ROM cache. This would only leave CMP and FCP transaction failures as a potential problem source for applications. Reported-and-tested-by: Thomas Seilund Reported-and-tested-by: René Fritz Signed-off-by: Stefan Richter diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 95a4714..9f661e0 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -455,15 +455,20 @@ static struct device_attribute fw_device_attributes[] = { static int read_rom(struct fw_device *device, int generation, int index, u32 *data) { - int rcode; + u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4; + int i, rcode; /* device->node_id, accessed below, must not be older than generation */ smp_rmb(); - rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, - device->node_id, generation, device->max_speed, - (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, - data, 4); + for (i = 10; i < 100; i += 10) { + rcode = fw_run_transaction(device->card, + TCODE_READ_QUADLET_REQUEST, device->node_id, + generation, device->max_speed, offset, data, 4); + if (rcode != RCODE_BUSY) + break; + msleep(i); + } be32_to_cpus(data); return rcode; -- cgit v0.10.2 From 441c850857148935babe000fc2ba1455fe54a6a9 Mon Sep 17 00:00:00 2001 From: Curt Wohlgemuth Date: Sat, 13 Aug 2011 11:25:18 -0400 Subject: ext4: Fix ext4_should_writeback_data() for no-journal mode ext4_should_writeback_data() had an incorrect sequence of tests to determine if it should return 0 or 1: in particular, even in no-journal mode, 0 was being returned for a non-regular-file inode. This meant that, in non-journal mode, we would use ext4_journalled_aops for directories, symlinks, and other non-regular files. However, calling journalled aop callbacks when there is no valid handle, can cause problems. This would cause a kernel crash with Jan Kara's commit 2d859db3e4 ("ext4: fix data corruption in inodes with journalled data"), because we now dereference 'handle' in ext4_journalled_write_end(). I also added BUG_ONs to check for a valid handle in the obviously journal-only aops callbacks. I tested this running xfstests with a scratch device in these modes: - no-journal - data=ordered - data=writeback - data=journal All work fine; the data=journal run has many failures and a crash in xfstests 074, but this is no different from a vanilla kernel. Signed-off-by: Curt Wohlgemuth Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index bb85757..5802fa1 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -289,10 +289,10 @@ static inline int ext4_should_order_data(struct inode *inode) static inline int ext4_should_writeback_data(struct inode *inode) { - if (!S_ISREG(inode->i_mode)) - return 0; if (EXT4_JOURNAL(inode) == NULL) return 1; + if (!S_ISREG(inode->i_mode)) + return 0; if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) return 0; if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d47264c..ad3a7ca 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -983,6 +983,8 @@ static int ext4_journalled_write_end(struct file *file, from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; + BUG_ON(!ext4_handle_valid(handle)); + if (copied < len) { if (!PageUptodate(page)) copied = 0; @@ -1699,6 +1701,8 @@ static int __ext4_journalled_writepage(struct page *page, goto out; } + BUG_ON(!ext4_handle_valid(handle)); + ret = walk_page_buffers(handle, page_bufs, 0, len, NULL, do_journal_get_write_access); -- cgit v0.10.2 From 2581fdc810889fdea97689cb62481201d579c796 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Sat, 13 Aug 2011 12:17:13 -0400 Subject: ext4: call ext4_ioend_wait and ext4_flush_completed_IO in ext4_evict_inode Flush inode's i_completed_io_list before calling ext4_io_wait to prevent the following deadlock scenario: A page fault happens while some process is writing inode A. During page fault, shrink_icache_memory is called that in turn evicts another inode B. Inode B has some pending io_end work so it calls ext4_ioend_wait() that waits for inode B's i_ioend_count to become zero. However, inode B's ioend work was queued behind some of inode A's ioend work on the same cpu's ext4-dio-unwritten workqueue. As the ext4-dio-unwritten thread on that cpu is processing inode A's ioend work, it tries to grab inode A's i_mutex lock. Since the i_mutex lock of inode A is still hold before the page fault happened, we enter a deadlock. Also moves ext4_flush_completed_IO and ext4_ioend_wait from ext4_destroy_inode() to ext4_evict_inode(). During inode deleteion, ext4_evict_inode() is called before ext4_destroy_inode() and in ext4_evict_inode(), we may call ext4_truncate() without holding i_mutex lock. As a result, there is a race between flush_completed_IO that is called from ext4_ext_truncate() and ext4_end_io_work, which may cause corruption on an io_end structure. This change moves ext4_flush_completed_IO and ext4_ioend_wait from ext4_destroy_inode() to ext4_evict_inode() to resolve the race between ext4_truncate() and ext4_end_io_work during inode deletion. Signed-off-by: Jiaying Zhang Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ad3a7ca..7dd6981 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -120,6 +120,12 @@ void ext4_evict_inode(struct inode *inode) int err; trace_ext4_evict_inode(inode); + + mutex_lock(&inode->i_mutex); + ext4_flush_completed_IO(inode); + mutex_unlock(&inode->i_mutex); + ext4_ioend_wait(inode); + if (inode->i_nlink) { /* * When journalling data dirty buffers are tracked only in the diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4687fea..44d0c8d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -919,7 +919,6 @@ static void ext4_i_callback(struct rcu_head *head) static void ext4_destroy_inode(struct inode *inode) { - ext4_ioend_wait(inode); if (!list_empty(&(EXT4_I(inode)->i_orphan))) { ext4_msg(inode->i_sb, KERN_ERR, "Inode %lu (%p): orphan list check failed!", -- cgit v0.10.2 From 32c80b32c053dc52712dedac5e4d0aa7c93fc353 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Sat, 13 Aug 2011 12:30:59 -0400 Subject: ext4: Resolve the hang of direct i/o read in handling EXT4_IO_END_UNWRITTEN. EXT4_IO_END_UNWRITTEN flag set and the increase of i_aiodio_unwritten should be done simultaneously since ext4_end_io_nolock always clear the flag and decrease the counter in the same time. We don't increase i_aiodio_unwritten when setting EXT4_IO_END_UNWRITTEN so it will go nagative and causes some process to wait forever. Part of the patch came from Eric in his e-mail, but it doesn't fix the problem met by Michael actually. http://marc.info/?l=linux-ext4&m=131316851417460&w=2 Reported-and-Tested-by: Michael Tokarev Signed-off-by: Eric Sandeen Signed-off-by: Tao Ma Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7dd6981..762e803 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2678,8 +2678,15 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) goto out; } - io_end->flag = EXT4_IO_END_UNWRITTEN; + /* + * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now, + * but being more careful is always safe for the future change. + */ inode = io_end->inode; + if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) { + io_end->flag |= EXT4_IO_END_UNWRITTEN; + atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); + } /* Add the io_end to per-inode completed io list*/ spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 430c401..78839af 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -334,8 +334,10 @@ submit_and_retry: if ((io_end->num_io_pages >= MAX_IO_PAGES) && (io_end->pages[io_end->num_io_pages-1] != io_page)) goto submit_and_retry; - if (buffer_uninit(bh)) - io->io_end->flag |= EXT4_IO_END_UNWRITTEN; + if (buffer_uninit(bh) && !(io_end->flag & EXT4_IO_END_UNWRITTEN)) { + io_end->flag |= EXT4_IO_END_UNWRITTEN; + atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); + } io->io_end->size += bh->b_size; io->io_next_block++; ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh)); -- cgit v0.10.2 From 9dd75f1f1a02d656a11a7b9b9e6c2759b9c1e946 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 13 Aug 2011 12:58:21 -0400 Subject: ext4: fix nomblk_io_submit option so it correctly converts uninit blocks Bug discovered by Jan Kara: Finally, commit 1449032be17abb69116dbc393f67ceb8bd034f92 returned back the old IO submission code but apparently it forgot to return the old handling of uninitialized buffers so we unconditionnaly call block_write_full_page() without specifying end_io function. So AFAICS we never convert unwritten extents to written in some cases. For example when I mount the fs as: mount -t ext4 -o nomblk_io_submit,dioread_nolock /dev/ubdb /mnt and do int fd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, 0600); char buf[1024]; memset(buf, 'a', sizeof(buf)); fallocate(fd, 0, 0, 16384); write(fd, buf, sizeof(buf)); I get a file full of zeros (after remounting the filesystem so that pagecache is dropped) instead of seeing the first KB contain 'a's. Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 762e803..c4da98a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1291,7 +1291,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd, else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT)) err = ext4_bio_write_page(&io_submit, page, len, mpd->wbc); - else + else if (buffer_uninit(page_bufs)) { + ext4_set_bh_endio(page_bufs, inode); + err = block_write_full_page_endio(page, + noalloc_get_block_write, + mpd->wbc, ext4_end_io_buffer_write); + } else err = block_write_full_page(page, noalloc_get_block_write, mpd->wbc); -- cgit v0.10.2 From 4eb60d869fdad7acd098b53bfd1863c311d8933d Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 13 Aug 2011 09:02:43 -0700 Subject: Revert "iwlagn: sysfs couldn't find the priv pointer" This reverts commit cc1a93e68f6c0d736b771f0746e8e4186f483fdc. This fix introduced a bug: bad pointer in unload. Signed-off-by: Emmanuel Grumbach Signed-off-by: Wey-Yi Guy Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index 69d4ec4..fb7e436 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -134,7 +134,6 @@ static void iwl_pci_apm_config(struct iwl_bus *bus) static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) { bus->drv_data = drv_data; - pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data); } static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], @@ -455,6 +454,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } + pci_set_drvdata(pdev, bus); + bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &pci_ops; @@ -493,12 +494,11 @@ static void iwl_pci_down(struct iwl_bus *bus) static void __devexit iwl_pci_remove(struct pci_dev *pdev) { - struct iwl_priv *priv = pci_get_drvdata(pdev); - void *bus_specific = priv->bus->bus_specific; + struct iwl_bus *bus = pci_get_drvdata(pdev); - iwl_remove(priv); + iwl_remove(bus->drv_data); - iwl_pci_down(bus_specific); + iwl_pci_down(bus); } #ifdef CONFIG_PM @@ -506,20 +506,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) static int iwl_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_priv *priv = pci_get_drvdata(pdev); + struct iwl_bus *bus = pci_get_drvdata(pdev); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ - return iwl_suspend(priv); + return iwl_suspend(bus->drv_data); } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_priv *priv = pci_get_drvdata(pdev); + struct iwl_bus *bus = pci_get_drvdata(pdev); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if @@ -532,7 +532,7 @@ static int iwl_pci_resume(struct device *device) */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - return iwl_resume(priv); + return iwl_resume(bus->drv_data); } static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); -- cgit v0.10.2 From 16a9d06c753abc44f66f88e03bbecb3f1e45d71b Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 13 Aug 2011 09:02:44 -0700 Subject: iwlagn: sysfs couldn't find the priv pointer This bug has been introduced by: d593411084a56124aa9d80aafa15db8463b2d8f7 Author: Emmanuel Grumbach Date: Mon Jul 11 10:48:51 2011 +0300 iwlagn: simplify the bus architecture Revert part of the buggy patch: dev_get_drvdata will now return iwl_priv as it did before the patch. Signed-off-by: Emmanuel Grumbach Signed-off-by: Wey-Yi Guy Signed-off-by: John W. Linville diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c index fb7e436..2fdbffa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c @@ -134,6 +134,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus) static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) { bus->drv_data = drv_data; + pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data); } static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], @@ -454,8 +455,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } - pci_set_drvdata(pdev, bus); - bus->dev = &pdev->dev; bus->irq = pdev->irq; bus->ops = &pci_ops; @@ -479,26 +478,22 @@ out_no_pci: return err; } -static void iwl_pci_down(struct iwl_bus *bus) -{ - struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific; - - pci_disable_msi(pci_bus->pci_dev); - pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base); - pci_release_regions(pci_bus->pci_dev); - pci_disable_device(pci_bus->pci_dev); - pci_set_drvdata(pci_bus->pci_dev, NULL); - - kfree(bus); -} - static void __devexit iwl_pci_remove(struct pci_dev *pdev) { - struct iwl_bus *bus = pci_get_drvdata(pdev); + struct iwl_priv *priv = pci_get_drvdata(pdev); + struct iwl_bus *bus = priv->bus; + struct iwl_pci_bus *pci_bus = IWL_BUS_GET_PCI_BUS(bus); + struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); - iwl_remove(bus->drv_data); + iwl_remove(priv); - iwl_pci_down(bus); + pci_disable_msi(pci_dev); + pci_iounmap(pci_dev, pci_bus->hw_base); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + pci_set_drvdata(pci_dev, NULL); + + kfree(bus); } #ifdef CONFIG_PM @@ -506,20 +501,20 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) static int iwl_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_bus *bus = pci_get_drvdata(pdev); + struct iwl_priv *priv = pci_get_drvdata(pdev); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. */ - return iwl_suspend(bus->drv_data); + return iwl_suspend(priv); } static int iwl_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); - struct iwl_bus *bus = pci_get_drvdata(pdev); + struct iwl_priv *priv = pci_get_drvdata(pdev); /* Before you put code here, think about WoWLAN. You cannot check here * whether WoWLAN is enabled or not, and your code will run even if @@ -532,7 +527,7 @@ static int iwl_pci_resume(struct device *device) */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); - return iwl_resume(bus->drv_data); + return iwl_resume(priv); } static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); -- cgit v0.10.2 From 78869618a886d33d8cdfcb78cf9b245b5250e465 Mon Sep 17 00:00:00 2001 From: Aaron Lu Date: Mon, 11 Jul 2011 13:27:11 +0800 Subject: mmc: sdhci: fix retuning timer wrongly deleted in sdhci_tasklet_finish Currently, the retuning timer for retuning mode 1 will be deleted in function sdhci_tasklet_finish after a mmc request done, which will make retuning timing never trigger again. This patch fixed this problem. Signed-off-by: Aaron Lu Reviewed-by: Philip Rakity Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index c31a334..262985a 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1867,9 +1867,6 @@ static void sdhci_tasklet_finish(unsigned long param) del_timer(&host->timer); - if (host->version >= SDHCI_SPEC_300) - del_timer(&host->tuning_timer); - mrq = host->mrq; /* -- cgit v0.10.2 From 606a15e475880157dd2336f2dc220eacc9eaf36b Mon Sep 17 00:00:00 2001 From: Philip Rakity Date: Mon, 11 Jul 2011 14:47:54 -0700 Subject: mmc: sdhci: pxav3: controller needs 32 bit ADMA addressing Enable the quirk. (Best used in conjunction with patch downgrading ADMA to SDMA when transfer is not aligned.) Signed-off-by: Philip Rakity Acked-by: Zhangfei Gao Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 4198dbb..fc7e4a5 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -195,7 +195,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) clk_enable(clk); host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL - | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_32BIT_ADMA_SIZE; /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; -- cgit v0.10.2 From 7199e2b61d715c5e8901ff32513d2b80db8d3737 Mon Sep 17 00:00:00 2001 From: Jaehoon Chung Date: Tue, 12 Jul 2011 17:30:47 +0900 Subject: mmc: sdhci-s3c: add BROKEN_ADMA_ZEROLEN_DESC quirk Samsung SoCs need to set BROKEN_ADMA_ZEROLEN_DESC. (If ADMA operation is more than 65535, maybe set by zero.) Signed-off-by: Jaehoon Chung Signed-off-by: Kyungmin Park Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 460ffaf..03da44a 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -502,6 +502,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) /* This host supports the Auto CMD12 */ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + /* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */ + host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC; + if (pdata->cd_type == S3C_SDHCI_CD_NONE || pdata->cd_type == S3C_SDHCI_CD_PERMANENT) host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; -- cgit v0.10.2 From d5a5bd1c3f7e8d010393530d60df8da75218a488 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 22 Jul 2011 16:13:36 +0300 Subject: mmc: mmc_test: avoid stalled file in debugfs During card removal and inserting cycle the test file in the debugfs could be stalled until the host driver removes it. Let's keep the file in the linked list and destroy it when card is removed. Signed-off-by: Andy Shevchenko Acked-by: Per Forlin Signed-off-by: Chris Ball diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 006a5e9..742dc98 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2900,7 +2900,7 @@ static const struct file_operations mmc_test_fops_testlist = { .release = single_release, }; -static void mmc_test_free_file_test(struct mmc_card *card) +static void mmc_test_free_dbgfs_file(struct mmc_card *card) { struct mmc_test_dbgfs_file *df, *dfs; @@ -2917,34 +2917,21 @@ static void mmc_test_free_file_test(struct mmc_card *card) mutex_unlock(&mmc_test_lock); } -static int mmc_test_register_file_test(struct mmc_card *card) +static int __mmc_test_register_dbgfs_file(struct mmc_card *card, + const char *name, mode_t mode, const struct file_operations *fops) { struct dentry *file = NULL; struct mmc_test_dbgfs_file *df; - int ret = 0; - - mutex_lock(&mmc_test_lock); - - if (card->debugfs_root) - file = debugfs_create_file("test", S_IWUSR | S_IRUGO, - card->debugfs_root, card, &mmc_test_fops_test); - - if (IS_ERR_OR_NULL(file)) { - dev_err(&card->dev, - "Can't create test. Perhaps debugfs is disabled.\n"); - ret = -ENODEV; - goto err; - } if (card->debugfs_root) - file = debugfs_create_file("testlist", S_IRUGO, - card->debugfs_root, card, &mmc_test_fops_testlist); + file = debugfs_create_file(name, mode, card->debugfs_root, + card, fops); if (IS_ERR_OR_NULL(file)) { dev_err(&card->dev, - "Can't create testlist. Perhaps debugfs is disabled.\n"); - ret = -ENODEV; - goto err; + "Can't create %s. Perhaps debugfs is disabled.\n", + name); + return -ENODEV; } df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); @@ -2952,14 +2939,31 @@ static int mmc_test_register_file_test(struct mmc_card *card) debugfs_remove(file); dev_err(&card->dev, "Can't allocate memory for internal usage.\n"); - ret = -ENOMEM; - goto err; + return -ENOMEM; } df->card = card; df->file = file; list_add(&df->link, &mmc_test_file_test); + return 0; +} + +static int mmc_test_register_dbgfs_file(struct mmc_card *card) +{ + int ret; + + mutex_lock(&mmc_test_lock); + + ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, + &mmc_test_fops_test); + if (ret) + goto err; + + ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, + &mmc_test_fops_testlist); + if (ret) + goto err; err: mutex_unlock(&mmc_test_lock); @@ -2974,7 +2978,7 @@ static int mmc_test_probe(struct mmc_card *card) if (!mmc_card_mmc(card) && !mmc_card_sd(card)) return -ENODEV; - ret = mmc_test_register_file_test(card); + ret = mmc_test_register_dbgfs_file(card); if (ret) return ret; @@ -2986,7 +2990,7 @@ static int mmc_test_probe(struct mmc_card *card) static void mmc_test_remove(struct mmc_card *card) { mmc_test_free_result(card); - mmc_test_free_file_test(card); + mmc_test_free_dbgfs_file(card); } static struct mmc_driver mmc_driver = { @@ -3006,7 +3010,7 @@ static void __exit mmc_test_exit(void) { /* Clear stalled data if card is still plugged */ mmc_test_free_result(NULL); - mmc_test_free_file_test(NULL); + mmc_test_free_dbgfs_file(NULL); mmc_unregister_driver(&mmc_driver); } -- cgit v0.10.2 From 38ca285044be88a0fb47b6eb91deeeb729435fd0 Mon Sep 17 00:00:00 2001 From: Kyungmin Park Date: Tue, 26 Jul 2011 17:12:37 +0900 Subject: mmc: core: Detect eMMC v4.5 ext_csd entries The eMMC v4.5 Spec is released now: EXT_CSD_REV Extended CSD Revision 255-7 Reserved 6 Revision 1.6 (for MMC v4.5) 5 Revision 1.5 (for MMV v4.41) ... Signed-off-by: Kyungmin Park Signed-off-by: Chris Ball diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index aa7d1d7..5700b1c 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -259,7 +259,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; - if (card->ext_csd.rev > 5) { + if (card->ext_csd.rev > 6) { printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; -- cgit v0.10.2 From 1ccd4b7bfdcfcc8cc7ffc4a9c11d3ac5b6da8ca0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Thu, 28 Jul 2011 20:55:27 +0200 Subject: mmc: cb710: fix possible pci_dev leak in cb710_pci_configure() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported-by: Julia Lawall Signed-off-by: Michał Mirosław Signed-off-by: Chris Ball diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c index efec413..68cd05b 100644 --- a/drivers/misc/cb710/core.c +++ b/drivers/misc/cb710/core.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg); static int __devinit cb710_pci_configure(struct pci_dev *pdev) { unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); - struct pci_dev *pdev0 = pci_get_slot(pdev->bus, devfn); + struct pci_dev *pdev0; u32 val; cb710_pci_update_config_reg(pdev, 0x48, @@ -43,6 +43,7 @@ static int __devinit cb710_pci_configure(struct pci_dev *pdev) if (val & 0x80000000) return 0; + pdev0 = pci_get_slot(pdev->bus, devfn); if (!pdev0) return -ENODEV; -- cgit v0.10.2 From 9b7bbe1085eb2b0f2d5d81f4116772cb2af497a4 Mon Sep 17 00:00:00 2001 From: Shashidhar Hiremath Date: Fri, 29 Jul 2011 08:49:50 -0400 Subject: mmc: dw_mmc: Fix mask in IDMAC_SET_BUFFER1_SIZE macro The mask used inside this macro was assuming Buffer_Size1's [BS1's] width to be 14 bits, it is actually 13 bits. Modify masks used in IDMAC_SET_BUFFER1_SIZE such that they use only 13 bits instead of current 14. Signed-off-by: Shashidhar Hiremath Acked-by: Will Newton Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 77f0b6b..f13bb49 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -62,7 +62,7 @@ struct idmac_desc { u32 des1; /* Buffer sizes */ #define IDMAC_SET_BUFFER1_SIZE(d, s) \ - ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff)) + ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) u32 des2; /* buffer 1 physical address */ -- cgit v0.10.2 From 55156d240a4d41d47310278c5139e24517f1c65b Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 29 Jul 2011 15:35:00 +0100 Subject: mmc: sdhci-s3c: Fix build for header change A header change has removed an implicit inclusion of module.h, breaking the build due to the use of THIS_MODULE. Fix that. Signed-off-by: Mark Brown Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 03da44a..2bd7bf4 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -19,6 +19,7 @@ #include #include #include +#include #include -- cgit v0.10.2 From 0d58864bf3472f8390e0c0a33bd875c7eec868bd Mon Sep 17 00:00:00 2001 From: Tony Lin Date: Thu, 11 Aug 2011 16:45:59 -0400 Subject: mmc: esdhc-imx: fix card interrupt loss on freescale eSDHC Apply a workaround for the imx eSDHC controller to avoid missing card interrupts. This makes SDIO work. Signed-off-by: Tony Lin Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 9ebfb4b..0e9780f 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -27,6 +27,7 @@ #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" +#define SDHCI_CTRL_D3CD 0x08 /* VENDOR SPEC register */ #define SDHCI_VENDOR_SPEC 0xC0 #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 @@ -141,13 +142,32 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; struct esdhc_platform_data *boarddata = &imx_data->boarddata; - - if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) - && (boarddata->cd_type == ESDHC_CD_GPIO))) - /* - * these interrupts won't work with a custom card_detect gpio - */ - val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + u32 data; + + if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { + if (boarddata->cd_type == ESDHC_CD_GPIO) + /* + * These interrupts won't work with a custom + * card_detect gpio (only applied to mx25/35) + */ + val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + + if (val & SDHCI_INT_CARD_INT) { + /* + * Clear and then set D3CD bit to avoid missing the + * card interrupt. This is a eSDHC controller problem + * so we need to apply the following workaround: clear + * and set D3CD bit will make eSDHC re-sample the card + * interrupt. In case a card interrupt was lost, + * re-sample it by the following steps. + */ + data = readl(host->ioaddr + SDHCI_HOST_CONTROL); + data &= ~SDHCI_CTRL_D3CD; + writel(data, host->ioaddr + SDHCI_HOST_CONTROL); + data |= SDHCI_CTRL_D3CD; + writel(data, host->ioaddr + SDHCI_HOST_CONTROL); + } + } if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) @@ -217,8 +237,10 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) */ return; case SDHCI_HOST_CONTROL: - /* FSL messed up here, so we can just keep those two */ - new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); + /* FSL messed up here, so we can just keep those three */ + new_val = val & (SDHCI_CTRL_LED | \ + SDHCI_CTRL_4BITBUS | \ + SDHCI_CTRL_D3CD); /* ensure the endianess */ new_val |= ESDHC_HOST_CONTROL_LE; /* DMA mode bits are shifted */ -- cgit v0.10.2 From 4906baf080623b4971bdeeac0a9fec5b8885d3ac Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Wed, 3 Aug 2011 14:48:58 +0800 Subject: mmc: tmio: eliminate unused variable 'mmc' warning Fix below compile warning: CC drivers/mmc/host/tmio_mmc.o drivers/mmc/host/tmio_mmc.c: In function 'tmio_mmc_suspend': drivers/mmc/host/tmio_mmc.c:30: warning: unused variable 'mmc' drivers/mmc/host/tmio_mmc.c: In function 'tmio_mmc_resume': drivers/mmc/host/tmio_mmc.c:45: warning: unused variable 'mmc' Signed-off-by: Axel Lin Acked-by: Guennadi Liakhovetski Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 8d185de..44a9668 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -27,7 +27,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) { const struct mfd_cell *cell = mfd_get_cell(dev); - struct mmc_host *mmc = platform_get_drvdata(dev); int ret; ret = tmio_mmc_host_suspend(&dev->dev); @@ -42,7 +41,6 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) static int tmio_mmc_resume(struct platform_device *dev) { const struct mfd_cell *cell = mfd_get_cell(dev); - struct mmc_host *mmc = platform_get_drvdata(dev); int ret = 0; /* Tell the MFD core we are ready to be enabled */ -- cgit v0.10.2 From 83cbcd93a1be803ccda53e7acbdc9a937c8f6375 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 3 Aug 2011 18:35:58 +0300 Subject: mmc: Revert "mmc: sdhci: Fix SDHCI_QUIRK_TIMEOUT_USES_SDCLK" This reverts commit 4b01681c7764, which introduced a new potential divide by zero in the process of fixing one. The subsequent commits attempt to fix the issue properly. Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 262985a..11d031b 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -632,9 +632,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) target_timeout = data->timeout_ns / 1000 + data->timeout_clks / host->clock; - if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) - host->timeout_clk = host->clock / 1000; - /* * Figure out needed cycles. * We do this in steps in order to fit inside a 32 bit int. @@ -645,7 +642,6 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) * => * (1) / (2) > 2^6 */ - BUG_ON(!host->timeout_clk); count = 0; current_timeout = (1 << 13) * 1000 / host->timeout_clk; while (current_timeout < target_timeout) { @@ -2474,6 +2470,9 @@ int sdhci_add_host(struct sdhci_host *host) if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) + host->timeout_clk = host->clock / 1000; + /* * In case of Host Controller v3.00, find out whether clock * multiplier is supported. -- cgit v0.10.2 From 78a2ca2727a9b992901c715bc881b6ddb4ec6a4e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 3 Aug 2011 18:35:59 +0300 Subject: mmc: sdhci: check host->clock before using it as a denominator Sometimes host->clock could be zero which is a legal situation. This patch checks host->clock before usage as a denominator when timeout is calculated. A similar patch is applied for mmc core (see commit e9b8684, "mmc: fix division by zero in MMC core"). Without this patch, the execution of the sdhci_calc_timeout could end up with a backtrace: <0>[ 4.014319] divide error: 0000 [#1] PREEMPT SMP <4>[ 4.014352] Modules linked in: g_ether <4>[ 4.014376] <4>[ 4.014393] Pid: 33, comm: kworker/u:2 Not tainted 3.0.0+ #646 <4>[ 4.014421] EIP: 0060:[] EFLAGS: 00010046 CPU: 1 <4>[ 4.014449] EIP is at sdhci_calc_timeout+0x2e/0x100 <4>[ 4.014468] EAX: 00000000 EBX: f5930fc8 ECX: 00000000 EDX: 00000000 <4>[ 4.014488] ESI: f5291de8 EDI: f5291db8 EBP: f5291c6c ESP: f5291c50 <4>[ 4.014508] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 <0>[ 4.014529] Process kworker/u:2 (pid: 33, ti=f5290000 task=f53065a0 task.ti=f5290000) <0>[ 4.014546] Stack: <4>[ 4.014557] 00000082 c1054fdd f5291c78 04000000 f5930fc8 f5291de8 f5291db8 f5291cac <4>[ 4.014611] c12fab7c c107a98b f5291c88 c13b6d3f f593109c f5882000 f5291cac c1054fdd <4>[ 4.014663] 00000000 00000000 f5882000 00000082 f5930fc8 f5291db8 0000000a f5291ccc <0>[ 4.014716] Call Trace: <4>[ 4.014743] [] ? mod_timer+0x11d/0x380 <4>[ 4.014770] [] sdhci_prepare_data+0x2c/0x3a0 <4>[ 4.014798] [] ? trace_hardirqs_off+0xb/0x10 <4>[ 4.014827] [] ? _raw_spin_unlock_irqrestore+0x2f/0x60 <4>[ 4.014854] [] ? mod_timer+0x11d/0x380 <4>[ 4.014880] [] sdhci_send_command+0xdb/0x210 <4>[ 4.014906] [] sdhci_request+0xc3/0x150 <4>[ 4.014932] [] mmc_start_request+0xda/0x200 <4>[ 4.014960] [] ? __raw_spin_lock_init+0x32/0x60 <4>[ 4.014989] [] ? __init_waitqueue_head+0x35/0x50 <4>[ 4.015015] [] mmc_wait_for_req+0x7b/0x90 <4>[ 4.015045] [] mmc_send_cxd_data+0xf7/0x130 <4>[ 4.015076] [] ? mmc_erase+0x140/0x140 <4>[ 4.015102] [] mmc_send_ext_csd+0x1d/0x20 <4>[ 4.015125] [] mmc_get_ext_csd+0x70/0x140 <4>[ 4.015151] [] mmc_compare_ext_csds+0x28/0x190 <4>[ 4.015176] [] mmc_init_card+0x24f/0x650 <4>[ 4.015201] [] ? _raw_spin_unlock_irqrestore+0x4d/0x60 <4>[ 4.015226] [] ? trace_hardirqs_on_caller+0x11c/0x160 <4>[ 4.015255] [] mmc_attach_mmc+0xa4/0x190 <4>[ 4.015282] [] mmc_rescan+0x210/0x240 <4>[ 4.015311] [] process_one_work+0x176/0x550 <4>[ 4.015336] [] ? process_one_work+0xfa/0x550 <4>[ 4.015360] [] ? mmc_init_erase+0x140/0x140 <4>[ 4.015385] [] worker_thread+0x12a/0x2c0 <4>[ 4.015410] [] ? manage_workers.clone.18+0x100/0x100 <4>[ 4.015437] [] kthread+0x74/0x80 <4>[ 4.015463] [] ? __init_kthread_worker+0x60/0x60 <4>[ 4.015490] [] kernel_thread_helper+0x6/0xd <0>[ 4.015507] Code: 57 89 d7 56 53 89 c3 83 ec 10 8b 40 04 8b 72 28 f6 c4 10 89 45 f0 0f 85 91 00 00 00 85 f6 0f 84 c1 00 00 00 8b 4e 04 31 d2 89 c8 73 58 ba d3 4d 62 10 89 c1 8b 06 f7 e2 c1 ea 06 01 d1 f7 45 <0>[ 4.015829] EIP: [] sdhci_calc_timeout+0x2e/0x100 SS:ESP 0068:f5291c50 Reported-by: Alexander Shishkin Signed-off-by: Andy Shevchenko Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 11d031b..89ba451 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -628,9 +628,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) /* timeout in us */ if (!data) target_timeout = cmd->cmd_timeout_ms * 1000; - else - target_timeout = data->timeout_ns / 1000 + - data->timeout_clks / host->clock; + else { + target_timeout = data->timeout_ns / 1000; + if (host->clock) + target_timeout += data->timeout_clks / host->clock; + } /* * Figure out needed cycles. -- cgit v0.10.2 From 272308caaa6c0f2b1500a3660b9fa75f17a45cc4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 3 Aug 2011 18:36:00 +0300 Subject: mmc: sdhci: move timeout_clk calculation farther down This moves the calculation below the assignment of mmc->f_max, which we need for calculating timeout_clk in the next patch in this series. Signed-off-by: Andy Shevchenko Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 89ba451..afa26bd 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2456,25 +2456,6 @@ int sdhci_add_host(struct sdhci_host *host) host->max_clk = host->ops->get_max_clock(host); } - host->timeout_clk = - (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; - if (host->timeout_clk == 0) { - if (host->ops->get_timeout_clock) { - host->timeout_clk = host->ops->get_timeout_clock(host); - } else if (!(host->quirks & - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { - printk(KERN_ERR - "%s: Hardware doesn't specify timeout clock " - "frequency.\n", mmc_hostname(mmc)); - return -ENODEV; - } - } - if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) - host->timeout_clk *= 1000; - - if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) - host->timeout_clk = host->clock / 1000; - /* * In case of Host Controller v3.00, find out whether clock * multiplier is supported. @@ -2507,6 +2488,25 @@ int sdhci_add_host(struct sdhci_host *host) } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + host->timeout_clk = + (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; + if (host->timeout_clk == 0) { + if (host->ops->get_timeout_clock) { + host->timeout_clk = host->ops->get_timeout_clock(host); + } else if (!(host->quirks & + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { + printk(KERN_ERR + "%s: Hardware doesn't specify timeout clock " + "frequency.\n", mmc_hostname(mmc)); + return -ENODEV; + } + } + if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) + host->timeout_clk *= 1000; + + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) + host->timeout_clk = host->clock / 1000; + if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); else -- cgit v0.10.2 From 65be3fef930beb3e282e7f23dfba63289971430c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 3 Aug 2011 18:36:01 +0300 Subject: mmc: sdhci: use f_max instead of host->clock for timeouts When timeout_clk is calculated the host->clock could be zero. So, instead of host->clock the calculation now uses mmc->f_max. Signed-off-by: Andy Shevchenko Cc: Mark Brown Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index afa26bd..0e02cc1 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2505,12 +2505,9 @@ int sdhci_add_host(struct sdhci_host *host) host->timeout_clk *= 1000; if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) - host->timeout_clk = host->clock / 1000; + host->timeout_clk = mmc->f_max / 1000; - if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) - mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000); - else - mmc->max_discard_to = (1 << 27) / host->timeout_clk; + mmc->max_discard_to = (1 << 27) / host->timeout_clk; mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; -- cgit v0.10.2 From 7435bb7950ba8a3cbfa6d0c01e92588562533a3f Mon Sep 17 00:00:00 2001 From: Jaehoon Chung Date: Wed, 10 Aug 2011 18:46:28 +0900 Subject: mmc: core: use defined R1_STATE_PRG macro for card status Signed-off-by: Jaehoon Chung Signed-off-by: Kyungmin Park Signed-off-by: Chris Ball diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 742dc98..2bf229a 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -224,7 +224,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test, static int mmc_test_busy(struct mmc_command *cmd) { return !(cmd->resp[0] & R1_READY_FOR_DATA) || - (R1_CURRENT_STATE(cmd->resp[0]) == 7); + (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); } /* diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 89bdeae..91a0a74 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1502,7 +1502,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, goto out; } } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || - R1_CURRENT_STATE(cmd.resp[0]) == 7); + R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); out: return err; } diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 845ce7c..770c3d0 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -407,7 +407,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, break; if (mmc_host_is_spi(card->host)) break; - } while (R1_CURRENT_STATE(status) == 7); + } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); if (mmc_host_is_spi(card->host)) { if (status & R1_SPI_ILLEGAL_COMMAND) -- cgit v0.10.2 From 6daa777866569fc48fe3cfcd6fd01aba37ac06a5 Mon Sep 17 00:00:00 2001 From: Seungwon Jeon Date: Fri, 5 Aug 2011 12:35:03 +0900 Subject: mmc: dw_mmc: Fix DDR mode support. Host driver can't get a hint of DDR mode through ios->ddr flag anymore. ios->timing is currently used to inform DDR mode as a substitute. And capability of MMC_CAP_MMC_HIGHSPEED is added for DDR support. Signed-off-by: Seungwon Jeon Acked-by: Will Newton Signed-off-by: Chris Ball diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index f13bb49..ff0f714 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -699,7 +699,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } /* DDR mode set */ - if (ios->ddr) { + if (ios->timing == MMC_TIMING_UHS_DDR50) { regs = mci_readl(slot->host, UHS_REG); regs |= (0x1 << slot->id) << 16; mci_writel(slot->host, UHS_REG, regs); @@ -1646,7 +1646,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) mmc->caps |= MMC_CAP_4_BIT_DATA; if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) - mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; #ifdef CONFIG_MMC_DW_IDMAC mmc->max_segs = host->ring_size; -- cgit v0.10.2 From 7fd781e8f9b72544a1c7f04456eb33d5ffaed592 Mon Sep 17 00:00:00 2001 From: Jaehoon Chung Date: Mon, 8 Aug 2011 18:10:52 +0900 Subject: mmc: remove unused "ddr" parameter in struct mmc_ios "mmc: dw_mmc: Fix DDR mode support" removed the last user. Signed-off-by: Jaehoon Chung Signed-off-by: Kyungmin Park Signed-off-by: Chris Ball diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0f83858..1d09562 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -56,8 +56,6 @@ struct mmc_ios { #define MMC_TIMING_UHS_SDR104 4 #define MMC_TIMING_UHS_DDR50 5 - unsigned char ddr; /* dual data rate used */ - #define MMC_SDR_MODE 0 #define MMC_1_2V_DDR_MODE 1 #define MMC_1_8V_DDR_MODE 2 -- cgit v0.10.2 From f6957f88e59df5008f7b2169400be657f81cdb80 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Sun, 7 Aug 2011 23:15:47 +0000 Subject: vmxnet3: Don't enable vlan filters in promiscuous mode. The vmxnet3 driver enables vlan filters if filtering is enabled for any vlan. In promiscuous mode the filter table is cleared to in order to disable filtering. However, if a vlan device is subsequently created that vlan will be added to the filter, re-engaging it. As a result, not only do we not see all the vlans in promiscuous mode, we don't even see vlans for which a filter was previously created. CC: Scott J. Goldman CC: Shreyas Bhatewara CC: VMware PV-Drivers Signed-off-by: Jesse Gross Signed-off-by: Shreyas N Bhatewara Signed-off-by: David S. Miller diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 1cbacb3..0959583 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1929,14 +1929,17 @@ static void vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; - unsigned long flags; - VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); - spin_lock_irqsave(&adapter->cmd_lock, flags); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_VLAN_FILTERS); - spin_unlock_irqrestore(&adapter->cmd_lock, flags); + if (!(netdev->flags & IFF_PROMISC)) { + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; + + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } set_bit(vid, adapter->active_vlans); } @@ -1946,14 +1949,17 @@ static void vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; - unsigned long flags; - VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); - spin_lock_irqsave(&adapter->cmd_lock, flags); - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, - VMXNET3_CMD_UPDATE_VLAN_FILTERS); - spin_unlock_irqrestore(&adapter->cmd_lock, flags); + if (!(netdev->flags & IFF_PROMISC)) { + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + unsigned long flags; + + VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } clear_bit(vid, adapter->active_vlans); } -- cgit v0.10.2 From 75bc8ef528f7c4ea7e80384c5593487b6b3b535e Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Mon, 8 Aug 2011 02:34:07 +0000 Subject: usbnet/cdc_ncm: Don't use stack variables for DMA The cdc_ncm driver still has a few places where stack variables are passed to the cdc_ncm_do_request function. This triggers a stack trace in lib/dma-debug.c if the CONFIG_DEBUG_DMA_API option is set. Adjust these calls to pass parameters that have been allocated with kzalloc. Signed-off-by: Josh Boyer Signed-off-by: David S. Miller diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a03336e..f06fb78 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -228,23 +228,40 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { - struct usb_cdc_ncm_ndp_input_size ndp_in_sz; + struct usb_cdc_ncm_ndp_input_size *ndp_in_sz; + + ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL); + if (!ndp_in_sz) { + err = -ENOMEM; + goto size_err; + } + err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), USB_CDC_SET_NTB_INPUT_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, - 0, iface_no, &ndp_in_sz, 8, 1000); + 0, iface_no, ndp_in_sz, 8, 1000); + kfree(ndp_in_sz); } else { - __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + __le32 *dwNtbInMaxSize; + dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize), + GFP_KERNEL); + if (!dwNtbInMaxSize) { + err = -ENOMEM; + goto size_err; + } + *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), USB_CDC_SET_NTB_INPUT_SIZE, USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, - 0, iface_no, &dwNtbInMaxSize, 4, 1000); + 0, iface_no, dwNtbInMaxSize, 4, 1000); + kfree(dwNtbInMaxSize); } - +size_err: if (err < 0) pr_debug("Setting NTB Input Size failed\n"); } @@ -325,19 +342,29 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) /* set Max Datagram Size (MTU) */ if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { - __le16 max_datagram_size; + __le16 *max_datagram_size; u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); + + max_datagram_size = kzalloc(sizeof(*max_datagram_size), + GFP_KERNEL); + if (!max_datagram_size) { + err = -ENOMEM; + goto max_dgram_err; + } + err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), USB_CDC_GET_MAX_DATAGRAM_SIZE, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, + 0, iface_no, max_datagram_size, 2, 1000); if (err < 0) { pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", CDC_NCM_MIN_DATAGRAM_SIZE); + kfree(max_datagram_size); } else { - ctx->max_datagram_size = le16_to_cpu(max_datagram_size); + ctx->max_datagram_size = + le16_to_cpu(*max_datagram_size); /* Check Eth descriptor value */ if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) { if (ctx->max_datagram_size > eth_max_sz) @@ -360,8 +387,10 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx) USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 0, - iface_no, &max_datagram_size, + iface_no, max_datagram_size, 2, 1000); + kfree(max_datagram_size); +max_dgram_err: if (err < 0) pr_debug("SET_MAX_DATAGRAM_SIZE failed\n"); } -- cgit v0.10.2 From 951f2f960e5bbce20309de44626cf11d17847712 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 8 Aug 2011 06:28:50 +0000 Subject: drivers/net/can/sja1000/plx_pci.c: eliminate double free In this code, the failure_cleanup label calls the function plx_pci_del_card, which frees everything in the card->net_dev array. dev is placed in this array immediately after allocation, so the two subsequent jumps to failure_cleanup should not also call free_sja1000dev, but the second one does. If plx_pci_check_sja1000 fails, then free_sja1000dev is also called on dev. Because dev is already in the card->net_dev array, this implies that when plx_pci_del_card is later called, it may get freed again. So that entry is reset to NULL after the free. Finally, if there is a problem with one channel, there will be a hole in the array. card->channels counts the number of channels that have succeeded, and does not keep track of the index of the largest element in the array that is valid. So the loop in plx_pci_del_card is changed to go up to PLX_PCI_MAX_CHAN, which is only 2. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 231385b..c7f3d4e 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -408,7 +408,7 @@ static void plx_pci_del_card(struct pci_dev *pdev) struct sja1000_priv *priv; int i = 0; - for (i = 0; i < card->channels; i++) { + for (i = 0; i < PLX_PCI_MAX_CHAN; i++) { dev = card->net_dev[i]; if (!dev) continue; @@ -536,7 +536,6 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, if (err) { dev_err(&pdev->dev, "Registering device failed " "(err=%d)\n", err); - free_sja1000dev(dev); goto failure_cleanup; } @@ -549,6 +548,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev, dev_err(&pdev->dev, "Channel #%d not detected\n", i + 1); free_sja1000dev(dev); + card->net_dev[i] = NULL; } } -- cgit v0.10.2 From 6a27cdeddf48858089e3672f844615cbf0877ebf Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Tue, 9 Aug 2011 20:15:50 +0000 Subject: net: sh_eth: Fix build by forgot including linux/interrupt.h By a6b7a407865aab9f849dd99a71072b7cd1175116, remove interrupt.h from netdevice.h. But this forget to revise sh_eth. This fix the build failure. error: expected '=', ',', ';', 'asm' or '__attribute__' before 'sh_eth_interrupt' error: implicit declaration of function 'request_irq' error: 'sh_eth_interrupt' undeclared (first use in this function) error: (Each undeclared identifier is reported only once drivers/net/sh_eth.c:1386: error: for each function it appears in.) error: 'IRQF_SHARED' undeclared (first use in this function) error: implicit declaration of function 'free_irq' Signed-off-by: Nobuhiro Iwamatsu CC: Alexey Dobriyan Signed-off-by: David S. Miller diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index ad35c21..190f619 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -21,6 +21,7 @@ */ #include +#include #include #include #include -- cgit v0.10.2 From 32f7fd44ce3aae9ad204fb167d793c335608568d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 10 Aug 2011 01:19:48 +0000 Subject: gianfar: prevent buggy hw rx vlan tagging On some buggy chips, "vlan tag present" flag is set which causes packet loss. Fix this by checking if rx vlan accel is enabled in features. Reported-by: Michael Guntsche Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 2659daa..31d5c57 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2710,8 +2710,13 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, /* Tell the skb what kind of packet this is */ skb->protocol = eth_type_trans(skb, dev); - /* Set vlan tag */ - if (fcb->flags & RXFCB_VLN) + /* + * There's need to check for NETIF_F_HW_VLAN_RX here. + * Even if vlan rx accel is disabled, on some chips + * RXFCB_VLN is pseudo randomly set. + */ + if (dev->features & NETIF_F_HW_VLAN_RX && + fcb->flags & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, fcb->vlctl); /* Send the packet up the stack */ -- cgit v0.10.2 From b88cf73d9278a5838e3ac2b670ab3b4ff533ea17 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 11 Aug 2011 14:39:59 +0000 Subject: net: add missing entries to Documentation/networking/00-INDEX A simple janitor duty patch that adds a one sentence overview to 00-INDEX for all files that lacked it. - does not add entries for subdirectories - does not modify existing entries. Signed-off-by: David S. Miller diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 4edd78d..811252b 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -1,13 +1,21 @@ 00-INDEX - this file +3c359.txt + - information on the 3Com TokenLink Velocity XL (3c5359) driver. 3c505.txt - information on the 3Com EtherLink Plus (3c505) driver. +3c509.txt + - information on the 3Com Etherlink III Series Ethernet cards. 6pack.txt - info on the 6pack protocol, an alternative to KISS for AX.25 DLINK.txt - info on the D-Link DE-600/DE-620 parallel port pocket adapters PLIP.txt - PLIP: The Parallel Line Internet Protocol device driver +README.ipw2100 + - README for the Intel PRO/Wireless 2100 driver. +README.ipw2200 + - README for the Intel PRO/Wireless 2915ABG and 2200BG driver. README.sb1000 - info on General Instrument/NextLevel SURFboard1000 cable modem. alias.txt @@ -20,8 +28,12 @@ atm.txt - info on where to get ATM programs and support for Linux. ax25.txt - info on using AX.25 and NET/ROM code for Linux +batman-adv.txt + - B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames. baycom.txt - info on the driver for Baycom style amateur radio modems +bonding.txt + - Linux Ethernet Bonding Driver HOWTO: link aggregation in Linux. bridge.txt - where to get user space programs for ethernet bridging with Linux. can.txt @@ -34,32 +46,60 @@ cxacru.txt - Conexant AccessRunner USB ADSL Modem cxacru-cf.py - Conexant AccessRunner USB ADSL Modem configuration file parser +cxgb.txt + - Release Notes for the Chelsio N210 Linux device driver. +dccp.txt + - the Datagram Congestion Control Protocol (DCCP) (RFC 4340..42). de4x5.txt - the Digital EtherWORKS DE4?? and DE5?? PCI Ethernet driver decnet.txt - info on using the DECnet networking layer in Linux. depca.txt - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver +dl2k.txt + - README for D-Link DL2000-based Gigabit Ethernet Adapters (dl2k.ko). +dm9000.txt + - README for the Simtec DM9000 Network driver. dmfe.txt - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. +dns_resolver.txt + - The DNS resolver module allows kernel servies to make DNS queries. +driver.txt + - Softnet driver issues. e100.txt - info on Intel's EtherExpress PRO/100 line of 10/100 boards e1000.txt - info on Intel's E1000 line of gigabit ethernet boards +e1000e.txt + - README for the Intel Gigabit Ethernet Driver (e1000e). eql.txt - serial IP load balancing ewrk3.txt - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver +fib_trie.txt + - Level Compressed Trie (LC-trie) notes: a structure for routing. filter.txt - Linux Socket Filtering fore200e.txt - FORE Systems PCA-200E/SBA-200E ATM NIC driver info. framerelay.txt - info on using Frame Relay/Data Link Connection Identifier (DLCI). +gen_stats.txt + - Generic networking statistics for netlink users. +generic_hdlc.txt + - The generic High Level Data Link Control (HDLC) layer. generic_netlink.txt - info on Generic Netlink +gianfar.txt + - Gianfar Ethernet Driver. ieee802154.txt - Linux IEEE 802.15.4 implementation, API and drivers +ifenslave.c + - Configure network interfaces for parallel routing (bonding). +igb.txt + - README for the Intel Gigabit Ethernet Driver (igb). +igbvf.txt + - README for the Intel Gigabit Ethernet Driver (igbvf). ip-sysctl.txt - /proc/sys/net/ipv4/* variables ip_dynaddr.txt @@ -68,41 +108,117 @@ ipddp.txt - AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation iphase.txt - Interphase PCI ATM (i)Chip IA Linux driver info. +ipv6.txt + - Options to the ipv6 kernel module. +ipvs-sysctl.txt + - Per-inode explanation of the /proc/sys/net/ipv4/vs interface. irda.txt - where to get IrDA (infrared) utilities and info for Linux. +ixgb.txt + - README for the Intel 10 Gigabit Ethernet Driver (ixgb). +ixgbe.txt + - README for the Intel 10 Gigabit Ethernet Driver (ixgbe). +ixgbevf.txt + - README for the Intel Virtual Function (VF) Driver (ixgbevf). +l2tp.txt + - User guide to the L2TP tunnel protocol. lapb-module.txt - programming information of the LAPB module. ltpc.txt - the Apple or Farallon LocalTalk PC card driver +mac80211-injection.txt + - HOWTO use packet injection with mac80211 multicast.txt - Behaviour of cards under Multicast +multiqueue.txt + - HOWTO for multiqueue network device support. +netconsole.txt + - The network console module netconsole.ko: configuration and notes. +netdev-features.txt + - Network interface "feature mess and how to get out from it alive". netdevices.txt - info on network device driver functions exported to the kernel. +netif-msg.txt + - Design of the network interface message level setting (NETIF_MSG_*). +nfc.txt + - The Linux Near Field Communication (NFS) subsystem. olympic.txt - IBM PCI Pit/Pit-Phy/Olympic Token Ring driver info. +operstates.txt + - Overview of network interface operational states. +packet_mmap.txt + - User guide to memory mapped packet socket rings (PACKET_[RT]X_RING). +phonet.txt + - The Phonet packet protocol used in Nokia cellular modems. +phy.txt + - The PHY abstraction layer. +pktgen.txt + - User guide to the kernel packet generator (pktgen.ko). policy-routing.txt - IP policy-based routing +ppp_generic.txt + - Information about the generic PPP driver. +proc_net_tcp.txt + - Per inode overview of the /proc/net/tcp and /proc/net/tcp6 interfaces. +radiotap-headers.txt + - Background on radiotap headers. ray_cs.txt - Raylink Wireless LAN card driver info. +rds.txt + - Background on the reliable, ordered datagram delivery method RDS. +regulatory.txt + - Overview of the Linux wireless regulatory infrastructure. +rxrpc.txt + - Guide to the RxRPC protocol. +s2io.txt + - Release notes for Neterion Xframe I/II 10GbE driver. +scaling.txt + - Explanation of network scaling techniques: RSS, RPS, RFS, aRFS, XPS. +sctp.txt + - Notes on the Linux kernel implementation of the SCTP protocol. +secid.txt + - Explanation of the secid member in flow structures. skfp.txt - SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info. smc9.txt - the driver for SMC's 9000 series of Ethernet cards smctr.txt - SMC TokenCard TokenRing Linux driver info. +spider-net.txt + - README for the Spidernet Driver (as found in PS3 / Cell BE). +stmmac.txt + - README for the STMicro Synopsys Ethernet driver. +tc-actions-env-rules.txt + - rules for traffic control (tc) actions. +timestamping.txt + - overview of network packet timestamping variants. tcp.txt - short blurb on how TCP output takes place. +tcp-thin.txt + - kernel tuning options for low rate 'thin' TCP streams. tlan.txt - ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info. tms380tr.txt - SysKonnect Token Ring ISA/PCI adapter driver info. +tproxy.txt + - Transparent proxy support user guide. tuntap.txt - TUN/TAP device driver, allowing user space Rx/Tx of packets. +udplite.txt + - UDP-Lite protocol (RFC 3828) introduction. vortex.txt - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. +vxge.txt + - README for the Neterion X3100 PCIe Server Adapter. x25.txt - general info on X.25 development. x25-iface.txt - description of the X.25 Packet Layer to LAPB device interface. +xfrm_proc.txt + - description of the statistics package for XFRM. +xfrm_sync.txt + - sync patches for XFRM enable migration of an SA between hosts. +xfrm_sysctl.txt + - description of the XFRM configuration options. z8530drv.txt - info about Linux driver for Z8530 based HDLC cards for AX.25 -- cgit v0.10.2 From 320f24e482e6b390c608c6afec253405f9ab7436 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 11 Aug 2011 14:41:48 +0000 Subject: net: minor update to Documentation/networking/scaling.txt Incorporate last comments about hyperthreading, interrupt coalescing and the definition of cache domains into the network scaling document scaling.txt Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 7254b4b..58fd741 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt @@ -52,7 +52,8 @@ module parameter for specifying the number of hardware queues to configure. In the bnx2x driver, for instance, this parameter is called num_queues. A typical RSS configuration would be to have one receive queue for each CPU if the device supports enough queues, or otherwise at least -one for each cache domain at a particular cache level (L1, L2, etc.). +one for each memory domain, where a memory domain is a set of CPUs that +share a particular memory level (L1, L2, NUMA node, etc.). The indirection table of an RSS device, which resolves a queue by masked hash, is usually programmed by the driver at initialization. The @@ -82,11 +83,17 @@ RSS should be enabled when latency is a concern or whenever receive interrupt processing forms a bottleneck. Spreading load between CPUs decreases queue length. For low latency networking, the optimal setting is to allocate as many queues as there are CPUs in the system (or the -NIC maximum, if lower). Because the aggregate number of interrupts grows -with each additional queue, the most efficient high-rate configuration +NIC maximum, if lower). The most efficient high-rate configuration is likely the one with the smallest number of receive queues where no -CPU that processes receive interrupts reaches 100% utilization. Per-cpu -load can be observed using the mpstat utility. +receive queue overflows due to a saturated CPU, because in default +mode with interrupt coalescing enabled, the aggregate number of +interrupts (and thus work) grows with each additional queue. + +Per-cpu load can be observed using the mpstat utility, but note that on +processors with hyperthreading (HT), each hyperthread is represented as +a separate CPU. For interrupt handling, HT has shown no benefit in +initial tests, so limit the number of queues to the number of CPU cores +in the system. RPS: Receive Packet Steering @@ -145,7 +152,7 @@ the bitmap. == Suggested Configuration For a single queue device, a typical RPS configuration would be to set -the rps_cpus to the CPUs in the same cache domain of the interrupting +the rps_cpus to the CPUs in the same memory domain of the interrupting CPU. If NUMA locality is not an issue, this could also be all CPUs in the system. At high interrupt rate, it might be wise to exclude the interrupting CPU from the map since that already performs much work. @@ -154,7 +161,7 @@ For a multi-queue system, if RSS is configured so that a hardware receive queue is mapped to each CPU, then RPS is probably redundant and unnecessary. If there are fewer hardware queues than CPUs, then RPS might be beneficial if the rps_cpus for each queue are the ones that -share the same cache domain as the interrupting CPU for that queue. +share the same memory domain as the interrupting CPU for that queue. RFS: Receive Flow Steering @@ -326,7 +333,7 @@ The queue chosen for transmitting a particular flow is saved in the corresponding socket structure for the flow (e.g. a TCP connection). This transmit queue is used for subsequent packets sent on the flow to prevent out of order (ooo) packets. The choice also amortizes the cost -of calling get_xps_queues() over all packets in the connection. To avoid +of calling get_xps_queues() over all packets in the flow. To avoid ooo packets, the queue for a flow can subsequently only be changed if skb->ooo_okay is set for a packet in the flow. This flag indicates that there are no outstanding packets in the flow, so the transmit queue can -- cgit v0.10.2 From 588dc91151d99e9307c2f9a8468453274fe43ecd Mon Sep 17 00:00:00 2001 From: Wang Shaoyan Date: Thu, 11 Aug 2011 17:07:25 +0000 Subject: gianfar: reduce stack usage in gianfar_ethtool.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/net/gianfar_ethtool.c:765: warning: the frame size of 2048 bytes is larger than 1024 bytes Signed-off-by: Wang Shaoyan Reviewed-and-tested-by: Sebastian Pöhn Signed-off-by: David S. Miller diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 6e35069..25a8c2a 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -686,10 +686,21 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u { unsigned int last_rule_idx = priv->cur_filer_idx; unsigned int cmp_rqfpr; - unsigned int local_rqfpr[MAX_FILER_IDX + 1]; - unsigned int local_rqfcr[MAX_FILER_IDX + 1]; + unsigned int *local_rqfpr; + unsigned int *local_rqfcr; int i = 0x0, k = 0x0; int j = MAX_FILER_IDX, l = 0x0; + int ret = 1; + + local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), + GFP_KERNEL); + local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1), + GFP_KERNEL); + if (!local_rqfpr || !local_rqfcr) { + pr_err("Out of memory\n"); + ret = 0; + goto err; + } switch (class) { case TCP_V4_FLOW: @@ -706,7 +717,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u break; default: pr_err("Right now this class is not supported\n"); - return 0; + ret = 0; + goto err; } for (i = 0; i < MAX_FILER_IDX + 1; i++) { @@ -721,7 +733,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u if (i == MAX_FILER_IDX + 1) { pr_err("No parse rule found, can't create hash rules\n"); - return 0; + ret = 0; + goto err; } /* If a match was found, then it begins the starting of a cluster rule @@ -765,7 +778,10 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u priv->cur_filer_idx = priv->cur_filer_idx - 1; } - return 1; +err: + kfree(local_rqfcr); + kfree(local_rqfpr); + return ret; } static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd) -- cgit v0.10.2 From db12fb833a88c5114d70dcafebd33d460a09d593 Mon Sep 17 00:00:00 2001 From: Zac Storer Date: Sat, 13 Aug 2011 12:34:45 -0700 Subject: Documentation: fix spelling error in SubmittingPatches Fixed a spelling error. Signed-off-by: Zac Storer Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index 569f353..4468ce2 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches @@ -303,7 +303,7 @@ patches that are being emailed around. The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to -pass it on as a open-source patch. The rules are pretty simple: if you +pass it on as an open-source patch. The rules are pretty simple: if you can certify the below: Developer's Certificate of Origin 1.1 -- cgit v0.10.2 From 3c8429ad574f2d83878438522f41c003a6cc458e Mon Sep 17 00:00:00 2001 From: Luis de Bethencourt Date: Sat, 13 Aug 2011 12:34:47 -0700 Subject: Documentation: drop Linux Source Driver from kernel-docs references Dropping LSD (Linux Source Driver) since it hasn't been available for a long time. Signed-off-by: Luis de Bethencourt Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/kernel-docs.txt b/Documentation/kernel-docs.txt index 9a86746..0e0734b 100644 --- a/Documentation/kernel-docs.txt +++ b/Documentation/kernel-docs.txt @@ -620,17 +620,6 @@ (including this document itself) have been moved there, and might be more up to date than the web version. - * Name: "Linux Source Driver" - URL: http://lsd.linux.cz - Keywords: Browsing source code. - Description: "Linux Source Driver (LSD) is an application, which - can make browsing source codes of Linux kernel easier than you can - imagine. You can select between multiple versions of kernel (e.g. - 0.01, 1.0.0, 2.0.33, 2.0.34pre13, 2.0.0, 2.1.101 etc.). With LSD - you can search Linux kernel (fulltext, macros, types, functions - and variables) and LSD can generate patches for you on the fly - (files, directories or kernel)". - * Name: "Linux Kernel Source Reference" Author: Thomas Graichen. URL: http://marc.info/?l=linux-kernel&m=96446640102205&w=4 -- cgit v0.10.2 From ac1667db056a323cb0cb5d75e3bdb820804d46b6 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Sat, 13 Aug 2011 12:34:50 -0700 Subject: Documentation: add ARM user_debug to kernel-parameters.txt Usually kernel parameters are documented in kernel-parameters.txt but user_debug is only documented in the Kconfig. Document the option and point to the Kconfig help text for more info. Signed-off-by: Stephen Boyd Cc: Russell King Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 78926aa..246b132 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2635,6 +2635,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted. medium is write-protected). Example: quirks=0419:aaf5:rl,0421:0433:rc + user_debug= [KNL,ARM] + Format: + See arch/arm/Kconfig.debug help text. + 1 - undefined instruction events + 2 - system calls + 4 - invalid data aborts + 8 - SIGSEGV faults + 16 - SIGBUS faults + Example: user_debug=31 + userpte= [X86] Flags controlling user PTE allocations. -- cgit v0.10.2 From 1629024668d485d9ee8c5a6c9906b19ffd9a49d9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 13 Aug 2011 12:34:52 -0700 Subject: Documentation: kernel-parameters.txt cleanups General cleanups to kernel-parameters.txt: - add missing $ARCH that are being used/referenced - alphabetize the parameter restrictions list - spell "IA-64" as listed in arch/ia64/Kconfig instead of "IA64" - remove trailing whitespace - use hyphen in 32-bit etc. Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 246b132..6ca1f5c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -40,6 +40,7 @@ parameter is applicable: ALSA ALSA sound support is enabled. APIC APIC support is enabled. APM Advanced Power Management support is enabled. + ARM ARM architecture is enabled. AVR32 AVR32 architecture is enabled. AX25 Appropriate AX.25 support is enabled. BLACKFIN Blackfin architecture is enabled. @@ -49,6 +50,7 @@ parameter is applicable: EFI EFI Partitioning (GPT) is enabled EIDE EIDE/ATAPI support is enabled. FB The frame buffer device is enabled. + FTRACE Function tracing enabled. GCOV GCOV profiling is enabled. HW Appropriate hardware is enabled. IA-64 IA-64 architecture is enabled. @@ -69,6 +71,7 @@ parameter is applicable: Documentation/m68k/kernel-options.txt. MCA MCA bus support is enabled. MDA MDA console support is enabled. + MIPS MIPS architecture is enabled. MOUSE Appropriate mouse support is enabled. MSI Message Signaled Interrupts (PCI). MTD MTD (Memory Technology Device) support is enabled. @@ -100,7 +103,6 @@ parameter is applicable: SPARC Sparc architecture is enabled. SWSUSP Software suspend (hibernation) is enabled. SUSPEND System suspend states are enabled. - FTRACE Function tracing enabled. TPM TPM drivers are enabled. TS Appropriate touchscreen support is enabled. UMS USB Mass Storage support is enabled. @@ -115,7 +117,7 @@ parameter is applicable: X86-64 X86-64 architecture is enabled. More X86-64 boot options can be found in Documentation/x86/x86_64/boot-options.txt . - X86 Either 32bit or 64bit x86 (same as X86-32+X86-64) + X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64) XEN Xen support is enabled In addition, the following text indicates that the option: @@ -376,7 +378,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. atkbd.softrepeat= [HW] Use software keyboard repeat - autotest [IA64] + autotest [IA-64] baycom_epp= [HW,AX25] Format: , @@ -681,8 +683,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. uart[8250],mmio32,[,options] Start an early, polled-mode console on the 8250/16550 UART at the specified I/O port or MMIO address. - MMIO inter-register address stride is either 8bit (mmio) - or 32bit (mmio32). + MMIO inter-register address stride is either 8-bit + (mmio) or 32-bit (mmio32). The options are the same as for ttyS, above. earlyprintk= [X86,SH,BLACKFIN] @@ -725,7 +727,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. See Documentation/block/as-iosched.txt and Documentation/block/deadline-iosched.txt for details. - elfcorehdr= [IA64,PPC,SH,X86] + elfcorehdr= [IA-64,PPC,SH,X86] Specifies physical address of start of kernel core image elf header. Generally kexec loader will pass this option to capture kernel. @@ -791,7 +793,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. tracer at boot up. function-list is a comma separated list of functions. This list can be changed at run time by the set_ftrace_filter file in the debugfs - tracing directory. + tracing directory. ftrace_notrace=[function-list] [FTRACE] Do not trace the functions specified in @@ -829,7 +831,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. hashdist= [KNL,NUMA] Large hashes allocated during boot are distributed across NUMA nodes. Defaults on - for 64bit NUMA, off otherwise. + for 64-bit NUMA, off otherwise. Format: 0 | 1 (for off | on) hcl= [IA-64] SGI's Hardware Graph compatibility layer @@ -998,10 +1000,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. DMA. forcedac [x86_64] With this option iommu will not optimize to look - for io virtual address below 32 bit forcing dual + for io virtual address below 32-bit forcing dual address cycle on pci bus for cards supporting greater - than 32 bit addressing. The default is to look - for translation below 32 bit and if not available + than 32-bit addressing. The default is to look + for translation below 32-bit and if not available then look in the higher range. strict [Default Off] With this option on every unmap_single operation will @@ -1017,7 +1019,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. off disable Interrupt Remapping nosid disable Source ID checking - inttest= [IA64] + inttest= [IA-64] iomem= Disable strict checking of access to MMIO memory strict regions from userspace. @@ -1034,7 +1036,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. nomerge forcesac soft - pt [x86, IA64] + pt [x86, IA-64] io7= [HW] IO7 for Marvel based alpha systems See comment before marvel_specify_io7 in @@ -1165,7 +1167,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. kvm-amd.npt= [KVM,AMD] Disable nested paging (virtualized MMU) for all guests. - Default is 1 (enabled) if in 64bit or 32bit-PAE mode + Default is 1 (enabled) if in 64-bit or 32-bit PAE mode. kvm-intel.ept= [KVM,Intel] Disable extended page tables (virtualized MMU) support on capable Intel chips. @@ -1202,10 +1204,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. libata.dma=0 Disable all PATA and SATA DMA libata.dma=1 PATA and SATA Disk DMA only libata.dma=2 ATAPI (CDROM) DMA only - libata.dma=4 Compact Flash DMA only + libata.dma=4 Compact Flash DMA only Combinations also work, so libata.dma=3 enables DMA for disks and CDROMs, but not CFs. - + libata.ignore_hpa= [LIBATA] Ignore HPA limit libata.ignore_hpa=0 keep BIOS limits (default) libata.ignore_hpa=1 ignore limits, using full disk @@ -1331,7 +1333,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ltpc= [NET] Format: ,, - machvec= [IA64] Force the use of a particular machine-vector + machvec= [IA-64] Force the use of a particular machine-vector (machvec) in a generic kernel. Example: machvec=hpzx1_swiotlb @@ -1734,7 +1736,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. nointroute [IA-64] - nojitter [IA64] Disables jitter checking for ITC timers. + nojitter [IA-64] Disables jitter checking for ITC timers. no-kvmclock [X86,KVM] Disable paravirtualized KVM clock driver @@ -1800,7 +1802,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. nox2apic [X86-64,APIC] Do not enable x2APIC mode. - nptcg= [IA64] Override max number of concurrent global TLB + nptcg= [IA-64] Override max number of concurrent global TLB purges which is reported from either PAL_VM_SUMMARY or SAL PALO. @@ -2077,7 +2079,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Format: { parport | timid | 0 } See also Documentation/parport.txt. - pmtmr= [X86] Manual setup of pmtmr I/O Port. + pmtmr= [X86] Manual setup of pmtmr I/O Port. Override pmtimer IOPort with a hex value. e.g. pmtmr=0x508 -- cgit v0.10.2 From 6989b5bb2f0302d824bfc5a9272e17eef22353cc Mon Sep 17 00:00:00 2001 From: Paul Mcquade Date: Sat, 13 Aug 2011 12:34:54 -0700 Subject: Documentation: email-clients: Add better Thunderbird information Add better Thunderbird information. Add Thunderbird Registry instructions to: Enable UTF8 & Preformat mode Disable HTML mode Signed-off-by: Paul McQuade Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt index a0b58e2..860c29a 100644 --- a/Documentation/email-clients.txt +++ b/Documentation/email-clients.txt @@ -199,18 +199,16 @@ to coerce it into behaving. To beat some sense out of the internal editor, do this: -- Under account settings, composition and addressing, uncheck "Compose - messages in HTML format". - - Edit your Thunderbird config settings so that it won't use format=flowed. Go to "edit->preferences->advanced->config editor" to bring up the thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to "false". -- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML - composer, select "Preformat" from the drop-down box just under the subject - line, then close the message without saving. (This setting also applies to - the text composer, but the only control for it is in the HTML composer.) +- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false". + +- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true". + +- Enable UTF8: Set "prefs.converted-to-utf8" to "true". - Install the "toggle wordwrap" extension. Download the file from: https://addons.mozilla.org/thunderbird/addon/2351/ -- cgit v0.10.2 From 4126dacb5b2ca85b187a27b93805254567526dc8 Mon Sep 17 00:00:00 2001 From: Sergiu Iordache Date: Sat, 13 Aug 2011 12:34:56 -0700 Subject: Documentation: add Ramoops usage description Add a documentation file describing the usage of Ramoops Signed-off-by: Sergiu Iordache Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 1f89424..65bbd26 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@ -272,6 +272,8 @@ printk-formats.txt - how to get printk format specifiers right prio_tree.txt - info on radix-priority-search-tree use for indexing vmas. +ramoops.txt + - documentation of the ramoops oops/panic logging module. rbtree.txt - info on what red-black trees are and what they are for. robust-futex-ABI.txt diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt new file mode 100644 index 0000000..8fb1ba7 --- /dev/null +++ b/Documentation/ramoops.txt @@ -0,0 +1,76 @@ +Ramoops oops/panic logger +========================= + +Sergiu Iordache + +Updated: 8 August 2011 + +0. Introduction + +Ramoops is an oops/panic logger that writes its logs to RAM before the system +crashes. It works by logging oopses and panics in a circular buffer. Ramoops +needs a system with persistent RAM so that the content of that area can +survive after a restart. + +1. Ramoops concepts + +Ramoops uses a predefined memory area to store the dump. The start and size of +the memory area are set using two variables: + * "mem_address" for the start + * "mem_size" for the size. The memory size will be rounded down to a + power of two. + +The memory area is divided into "record_size" chunks (also rounded down to +power of two) and each oops/panic writes a "record_size" chunk of +information. + +Dumping both oopses and panics can be done by setting 1 in the "dump_oops" +variable while setting 0 in that variable dumps only the panics. + +The module uses a counter to record multiple dumps but the counter gets reset +on restart (i.e. new dumps after the restart will overwrite old ones). + +2. Setting the parameters + +Setting the ramoops parameters can be done in 2 different manners: + 1. Use the module parameters (which have the names of the variables described + as before). + 2. Use a platform device and set the platform data. The parameters can then + be set through that platform data. An example of doing that is: + +#include +[...] + +static struct ramoops_platform_data ramoops_data = { + .mem_size = <...>, + .mem_address = <...>, + .record_size = <...>, + .dump_oops = <...>, +}; + +static struct platform_device ramoops_dev = { + .name = "ramoops", + .dev = { + .platform_data = &ramoops_data, + }, +}; + +[... inside a function ...] +int ret; + +ret = platform_device_register(&ramoops_dev); +if (ret) { + printk(KERN_ERR "unable to register platform device\n"); + return ret; +} + +3. Dump format + +The data dump begins with a header, currently defined as "====" followed by a +timestamp and a new line. The dump then continues with the actual data. + +4. Reading the data + +The dump data can be read from memory (through /dev/mem or other means). +Getting the module parameters, which are needed in order to parse the data, can +be done through /sys/module/ramoops/parameters/* . -- cgit v0.10.2 From 399e1d9c22e15c1697d070bb89e6e0da3fae7e14 Mon Sep 17 00:00:00 2001 From: Ralf Thielow Date: Sat, 13 Aug 2011 12:34:57 -0700 Subject: Documentation: SubmittingDrivers: fix Linus's git tree URL Change resource URL to new git tree - (http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git). Signed-off-by: Ralf Thielow Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/SubmittingDrivers b/Documentation/SubmittingDrivers index 319baa8..36d16bb 100644 --- a/Documentation/SubmittingDrivers +++ b/Documentation/SubmittingDrivers @@ -130,7 +130,7 @@ Linux kernel master tree: ftp.??.kernel.org:/pub/linux/kernel/... ?? == your country code, such as "us", "uk", "fr", etc. - http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git + http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git Linux kernel mailing list: linux-kernel@vger.kernel.org -- cgit v0.10.2 From 4c74916fa81ce5a431350cb27eb9a7c95d3cf3d7 Mon Sep 17 00:00:00 2001 From: Marcos Souza Date: Sat, 13 Aug 2011 12:34:59 -0700 Subject: Documentation: befs.txt: no maintainer, orphaned Remove the name of Sergey Kostyliov as maintainer of befs. In the MAINTAINERS file, befs is orphaned. Signed-off-by: Marcos Souza Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/Documentation/filesystems/befs.txt b/Documentation/filesystems/befs.txt index 6e49c36..da45e6c 100644 --- a/Documentation/filesystems/befs.txt +++ b/Documentation/filesystems/befs.txt @@ -27,7 +27,7 @@ His original code can still be found at: Does anyone know of a more current email address for Makoto? He doesn't respond to the address given above... -Current maintainer: Sergey S. Kostyliov +This filesystem doesn't have a maintainer. WHAT IS THIS DRIVER? ================== -- cgit v0.10.2 From 17f2ae7f677f023997e02fd2ebabd90ea2a0390d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Sun, 14 Aug 2011 13:34:31 +0200 Subject: PM / Domains: Fix build for CONFIG_PM_RUNTIME unset Function genpd_queue_power_off_work() is not defined for CONFIG_PM_RUNTIME, so pm_genpd_poweroff_unused() causes a build error to happen in that case. Fix the problem by making pm_genpd_poweroff_unused() depend on CONFIG_PM_RUNTIME too. Signed-off-by: Rafael J. Wysocki diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e18566a..1c37457 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -460,6 +460,21 @@ static int pm_genpd_runtime_resume(struct device *dev) return 0; } +/** + * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. + */ +void pm_genpd_poweroff_unused(void) +{ + struct generic_pm_domain *genpd; + + mutex_lock(&gpd_list_lock); + + list_for_each_entry(genpd, &gpd_list, gpd_list_node) + genpd_queue_power_off_work(genpd); + + mutex_unlock(&gpd_list_lock); +} + #else static inline void genpd_power_off_work_fn(struct work_struct *work) {} @@ -1255,18 +1270,3 @@ void pm_genpd_init(struct generic_pm_domain *genpd, list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); } - -/** - * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use. - */ -void pm_genpd_poweroff_unused(void) -{ - struct generic_pm_domain *genpd; - - mutex_lock(&gpd_list_lock); - - list_for_each_entry(genpd, &gpd_list, gpd_list_node) - genpd_queue_power_off_work(genpd); - - mutex_unlock(&gpd_list_lock); -} diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 21097cb..f9ec173 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -72,8 +72,6 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, extern void pm_genpd_init(struct generic_pm_domain *genpd, struct dev_power_governor *gov, bool is_off); extern int pm_genpd_poweron(struct generic_pm_domain *genpd); -extern void pm_genpd_poweroff_unused(void); -extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); #else static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) @@ -101,8 +99,14 @@ static inline int pm_genpd_poweron(struct generic_pm_domain *genpd) { return -ENOSYS; } -static inline void pm_genpd_poweroff_unused(void) {} +#endif + +#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME +extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); +extern void pm_genpd_poweroff_unused(void); +#else static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} +static inline void pm_genpd_poweroff_unused(void) {} #endif #endif /* _LINUX_PM_DOMAIN_H */ diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index b1914cb9..3744c59 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -231,3 +231,7 @@ config PM_CLK config PM_GENERIC_DOMAINS bool depends on PM + +config PM_GENERIC_DOMAINS_RUNTIME + def_bool y + depends on PM_RUNTIME && PM_GENERIC_DOMAINS -- cgit v0.10.2 From da6094ea7d3c2295473d8f5134279307255d6ebf Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Sun, 14 Aug 2011 11:31:16 +0200 Subject: ALSA: snd_usb_caiaq: track submitted output urbs The snd_usb_caiaq driver currently assumes that output urbs are serviced in time and doesn't track when and whether they are given back by the USB core. That usually works fine, but due to temporary limitations of the XHCI stack, we faced that urbs were submitted more than once with this approach. As it's no good practice to fire and forget urbs anyway, this patch introduces a proper bit mask to track which requests have been submitted and given back. That alone however doesn't make the driver work in case the host controller is broken and doesn't give back urbs at all, and the output stream will stop once all pre-allocated output urbs are consumed. But it does prevent crashes of the controller stack in such cases. See http://bugzilla.kernel.org/show_bug.cgi?id=40702 for more details. Signed-off-by: Daniel Mack Reported-and-tested-by: Matej Laitl Cc: Sarah Sharp Cc: stable@kernel.org Signed-off-by: Takashi Iwai diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index aa52b3e..2cf87f5 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -139,8 +139,12 @@ static void stream_stop(struct snd_usb_caiaqdev *dev) for (i = 0; i < N_URBS; i++) { usb_kill_urb(dev->data_urbs_in[i]); - usb_kill_urb(dev->data_urbs_out[i]); + + if (test_bit(i, &dev->outurb_active_mask)) + usb_kill_urb(dev->data_urbs_out[i]); } + + dev->outurb_active_mask = 0; } static int snd_usb_caiaq_substream_open(struct snd_pcm_substream *substream) @@ -612,8 +616,8 @@ static void read_completed(struct urb *urb) { struct snd_usb_caiaq_cb_info *info = urb->context; struct snd_usb_caiaqdev *dev; - struct urb *out; - int frame, len, send_it = 0, outframe = 0; + struct urb *out = NULL; + int i, frame, len, send_it = 0, outframe = 0; size_t offset = 0; if (urb->status || !info) @@ -624,7 +628,17 @@ static void read_completed(struct urb *urb) if (!dev->streaming) return; - out = dev->data_urbs_out[info->index]; + /* find an unused output urb that is unused */ + for (i = 0; i < N_URBS; i++) + if (test_and_set_bit(i, &dev->outurb_active_mask) == 0) { + out = dev->data_urbs_out[i]; + break; + } + + if (!out) { + log("Unable to find an output urb to use\n"); + goto requeue; + } /* read the recently received packet and send back one which has * the same layout */ @@ -655,8 +669,12 @@ static void read_completed(struct urb *urb) out->number_of_packets = outframe; out->transfer_flags = URB_ISO_ASAP; usb_submit_urb(out, GFP_ATOMIC); + } else { + struct snd_usb_caiaq_cb_info *oinfo = out->context; + clear_bit(oinfo->index, &dev->outurb_active_mask); } +requeue: /* re-submit inbound urb */ for (frame = 0; frame < FRAMES_PER_URB; frame++) { urb->iso_frame_desc[frame].offset = BYTES_PER_FRAME * frame; @@ -678,6 +696,8 @@ static void write_completed(struct urb *urb) dev->output_running = 1; wake_up(&dev->prepare_wait_queue); } + + clear_bit(info->index, &dev->outurb_active_mask); } static struct urb **alloc_urbs(struct snd_usb_caiaqdev *dev, int dir, int *ret) @@ -829,6 +849,9 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) if (!dev->data_cb_info) return -ENOMEM; + dev->outurb_active_mask = 0; + BUILD_BUG_ON(N_URBS > (sizeof(dev->outurb_active_mask) * 8)); + for (i = 0; i < N_URBS; i++) { dev->data_cb_info[i].dev = dev; dev->data_cb_info[i].index = i; diff --git a/sound/usb/caiaq/device.h b/sound/usb/caiaq/device.h index b2b3101..3f9c633 100644 --- a/sound/usb/caiaq/device.h +++ b/sound/usb/caiaq/device.h @@ -96,6 +96,7 @@ struct snd_usb_caiaqdev { int input_panic, output_panic, warned; char *audio_in_buf, *audio_out_buf; unsigned int samplerates, bpp; + unsigned long outurb_active_mask; struct snd_pcm_substream *sub_playback[MAX_STREAMS]; struct snd_pcm_substream *sub_capture[MAX_STREAMS]; -- cgit v0.10.2 From f982f91516fa4cfd9d20518833cd04ad714585be Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Tue, 21 Jun 2011 22:09:50 +0200 Subject: mm: fix wrong vmap address calculations with odd NR_CPUS values Commit db64fe02258f ("mm: rewrite vmap layer") introduced code that does address calculations under the assumption that VMAP_BLOCK_SIZE is a power of two. However, this might not be true if CONFIG_NR_CPUS is not set to a power of two. Wrong vmap_block index/offset values could lead to memory corruption. However, this has never been observed in practice (or never been diagnosed correctly); what caught this was the BUG_ON in vb_alloc() that checks for inconsistent vmap_block indices. To fix this, ensure that VMAP_BLOCK_SIZE always is a power of two. BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=31572 Reported-by: Pavel Kysilka Reported-by: Matias A. Fonzo Signed-off-by: Clemens Ladisch Signed-off-by: Stefan Richter Cc: Nick Piggin Cc: Jeremy Fitzhardinge Cc: Krzysztof Helt Cc: Andrew Morton Cc: 2.6.28+ Signed-off-by: Linus Torvalds diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 464621d..7ef0903 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -725,9 +725,10 @@ static void free_unmap_vmap_area_addr(unsigned long addr) #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ -#define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ - VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ - VMALLOC_PAGES / NR_CPUS / 16)) +#define VMAP_BBMAP_BITS \ + VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ + VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ + VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) -- cgit v0.10.2 From 93ee7a9340d64f20295aacc3fb6a22b759323280 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 14 Aug 2011 15:09:08 -0700 Subject: Linux 3.1-rc2 diff --git a/Makefile b/Makefile index b4ca4e1..3241d41 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc1 -NAME = Sneaky Weasel +EXTRAVERSION = -rc2 +NAME = Wet Seal # *DOCUMENTATION* # To see a list of typical targets execute "make help" -- cgit v0.10.2 From c3c53a073247ee7522ca80393319540db9f4dc1e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 15 Aug 2011 10:15:10 +0930 Subject: virtio: Add text copy of spec to Documentation/virtual. As suggested by Christoph Hellwig. Signed-off-by: Rusty Russell diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index fe0251c..8e60199 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX @@ -8,3 +8,6 @@ lguest/ - Extremely simple hypervisor for experimental/educational use. uml/ - User Mode Linux, builds/runs Linux kernel as a userspace program. +virtio.txt + - Text version of draft virtio spec. + See http://ozlabs.org/~rusty/virtio-spec diff --git a/Documentation/virtual/virtio-spec.txt b/Documentation/virtual/virtio-spec.txt new file mode 100644 index 0000000..a350ae1 --- /dev/null +++ b/Documentation/virtual/virtio-spec.txt @@ -0,0 +1,2200 @@ +[Generated file: see http://ozlabs.org/~rusty/virtio-spec/] +Virtio PCI Card Specification +v0.9.1 DRAFT +- + +Rusty Russell IBM Corporation (Editor) + +2011 August 1. + +Purpose and Description + +This document describes the specifications of the “virtio” family +of PCI[LaTeX Command: nomenclature] devices. These are devices +are found in virtual environments[LaTeX Command: nomenclature], +yet by design they are not all that different from physical PCI +devices, and this document treats them as such. This allows the +guest to use standard PCI drivers and discovery mechanisms. + +The purpose of virtio and this specification is that virtual +environments and guests should have a straightforward, efficient, +standard and extensible mechanism for virtual devices, rather +than boutique per-environment or per-OS mechanisms. + + Straightforward: Virtio PCI devices use normal PCI mechanisms + of interrupts and DMA which should be familiar to any device + driver author. There is no exotic page-flipping or COW + mechanism: it's just a PCI device.[footnote: +This lack of page-sharing implies that the implementation of the +device (e.g. the hypervisor or host) needs full access to the +guest memory. Communication with untrusted parties (i.e. +inter-guest communication) requires copying. +] + + Efficient: Virtio PCI devices consist of rings of descriptors + for input and output, which are neatly separated to avoid cache + effects from both guest and device writing to the same cache + lines. + + Standard: Virtio PCI makes no assumptions about the environment + in which it operates, beyond supporting PCI. In fact the virtio + devices specified in the appendices do not require PCI at all: + they have been implemented on non-PCI buses.[footnote: +The Linux implementation further separates the PCI virtio code +from the specific virtio drivers: these drivers are shared with +the non-PCI implementations (currently lguest and S/390). +] + + Extensible: Virtio PCI devices contain feature bits which are + acknowledged by the guest operating system during device setup. + This allows forwards and backwards compatibility: the device + offers all the features it knows about, and the driver + acknowledges those it understands and wishes to use. + + Virtqueues + +The mechanism for bulk data transport on virtio PCI devices is +pretentiously called a virtqueue. Each device can have zero or +more virtqueues: for example, the network device has one for +transmit and one for receive. + +Each virtqueue occupies two or more physically-contiguous pages +(defined, for the purposes of this specification, as 4096 bytes), +and consists of three parts: + + ++-------------------+-----------------------------------+-----------+ +| Descriptor Table | Available Ring (padding) | Used Ring | ++-------------------+-----------------------------------+-----------+ + + +When the driver wants to send buffers to the device, it puts them +in one or more slots in the descriptor table, and writes the +descriptor indices into the available ring. It then notifies the +device. When the device has finished with the buffers, it writes +the descriptors into the used ring, and sends an interrupt. + +Specification + + PCI Discovery + +Any PCI device with Vendor ID 0x1AF4, and Device ID 0x1000 +through 0x103F inclusive is a virtio device[footnote: +The actual value within this range is ignored +]. The device must also have a Revision ID of 0 to match this +specification. + +The Subsystem Device ID indicates which virtio device is +supported by the device. The Subsystem Vendor ID should reflect +the PCI Vendor ID of the environment (it's currently only used +for informational purposes by the guest). + + ++----------------------+--------------------+---------------+ +| Subsystem Device ID | Virtio Device | Specification | ++----------------------+--------------------+---------------+ ++----------------------+--------------------+---------------+ +| 1 | network card | Appendix C | ++----------------------+--------------------+---------------+ +| 2 | block device | Appendix D | ++----------------------+--------------------+---------------+ +| 3 | console | Appendix E | ++----------------------+--------------------+---------------+ +| 4 | entropy source | Appendix F | ++----------------------+--------------------+---------------+ +| 5 | memory ballooning | Appendix G | ++----------------------+--------------------+---------------+ +| 6 | ioMemory | - | ++----------------------+--------------------+---------------+ +| 9 | 9P transport | - | ++----------------------+--------------------+---------------+ + + + Device Configuration + +To configure the device, we use the first I/O region of the PCI +device. This contains a virtio header followed by a +device-specific region. + +There may be different widths of accesses to the I/O region; the “ +natural” access method for each field in the virtio header must +be used (i.e. 32-bit accesses for 32-bit fields, etc), but the +device-specific region can be accessed using any width accesses, +and should obtain the same results. + +Note that this is possible because while the virtio header is PCI +(i.e. little) endian, the device-specific region is encoded in +the native endian of the guest (where such distinction is +applicable). + + Device Initialization Sequence + +We start with an overview of device initialization, then expand +on the details of the device and how each step is preformed. + + Reset the device. This is not required on initial start up. + + The ACKNOWLEDGE status bit is set: we have noticed the device. + + The DRIVER status bit is set: we know how to drive the device. + + Device-specific setup, including reading the Device Feature + Bits, discovery of virtqueues for the device, optional MSI-X + setup, and reading and possibly writing the virtio + configuration space. + + The subset of Device Feature Bits understood by the driver is + written to the device. + + The DRIVER_OK status bit is set. + + The device can now be used (ie. buffers added to the + virtqueues)[footnote: +Historically, drivers have used the device before steps 5 and 6. +This is only allowed if the driver does not use any features +which would alter this early use of the device. +] + +If any of these steps go irrecoverably wrong, the guest should +set the FAILED status bit to indicate that it has given up on the +device (it can reset the device later to restart if desired). + +We now cover the fields required for general setup in detail. + + Virtio Header + +The virtio header looks as follows: + + ++------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ +| Bits || 32 | 32 | 32 | 16 | 16 | 16 | 8 | 8 | ++------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ +| Read/Write || R | R+W | R+W | R | R+W | R+W | R+W | R | ++------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ +| Purpose || Device | Guest | Queue | Queue | Queue | Queue | Device | ISR | +| || Features bits 0:31 | Features bits 0:31 | Address | Size | Select | Notify | Status | Status | ++------------++---------------------+---------------------+----------+--------+---------+---------+---------+--------+ + + +If MSI-X is enabled for the device, two additional fields +immediately follow this header: + + ++------------++----------------+--------+ +| Bits || 16 | 16 | + +----------------+--------+ ++------------++----------------+--------+ +| Read/Write || R+W | R+W | ++------------++----------------+--------+ +| Purpose || Configuration | Queue | +| (MSI-X) || Vector | Vector | ++------------++----------------+--------+ + + +Finally, if feature bits (VIRTIO_F_FEATURES_HI) this is +immediately followed by two additional fields: + + ++------------++----------------------+---------------------- +| Bits || 32 | 32 ++------------++----------------------+---------------------- +| Read/Write || R | R+W ++------------++----------------------+---------------------- +| Purpose || Device | Guest +| || Features bits 32:63 | Features bits 32:63 ++------------++----------------------+---------------------- + + +Immediately following these general headers, there may be +device-specific headers: + + ++------------++--------------------+ +| Bits || Device Specific | + +--------------------+ ++------------++--------------------+ +| Read/Write || Device Specific | ++------------++--------------------+ +| Purpose || Device Specific... | +| || | ++------------++--------------------+ + + + Device Status + +The Device Status field is updated by the guest to indicate its +progress. This provides a simple low-level diagnostic: it's most +useful to imagine them hooked up to traffic lights on the console +indicating the status of each device. + +The device can be reset by writing a 0 to this field, otherwise +at least one bit should be set: + + ACKNOWLEDGE (1) Indicates that the guest OS has found the + device and recognized it as a valid virtio device. + + DRIVER (2) Indicates that the guest OS knows how to drive the + device. Under Linux, drivers can be loadable modules so there + may be a significant (or infinite) delay before setting this + bit. + + DRIVER_OK (3) Indicates that the driver is set up and ready to + drive the device. + + FAILED (8) Indicates that something went wrong in the guest, + and it has given up on the device. This could be an internal + error, or the driver didn't like the device for some reason, or + even a fatal error during device operation. The device must be + reset before attempting to re-initialize. + + Feature Bits + +The least significant 31 bits of the first configuration field +indicates the features that the device supports (the high bit is +reserved, and will be used to indicate the presence of future +feature bits elsewhere). If more than 31 feature bits are +supported, the device indicates so by setting feature bit 31 (see +[cha:Reserved-Feature-Bits]). The bits are allocated as follows: + + 0 to 23 Feature bits for the specific device type + + 24 to 40 Feature bits reserved for extensions to the queue and + feature negotiation mechanisms + + 41 to 63 Feature bits reserved for future extensions + +For example, feature bit 0 for a network device (i.e. Subsystem +Device ID 1) indicates that the device supports checksumming of +packets. + +The feature bits are negotiated: the device lists all the +features it understands in the Device Features field, and the +guest writes the subset that it understands into the Guest +Features field. The only way to renegotiate is to reset the +device. + +In particular, new fields in the device configuration header are +indicated by offering a feature bit, so the guest can check +before accessing that part of the configuration space. + +This allows for forwards and backwards compatibility: if the +device is enhanced with a new feature bit, older guests will not +write that feature bit back to the Guest Features field and it +can go into backwards compatibility mode. Similarly, if a guest +is enhanced with a feature that the device doesn't support, it +will not see that feature bit in the Device Features field and +can go into backwards compatibility mode (or, for poor +implementations, set the FAILED Device Status bit). + +Access to feature bits 32 to 63 is enabled by Guest by setting +feature bit 31. If this bit is unset, Device must assume that all +feature bits > 31 are unset. + + Configuration/Queue Vectors + +When MSI-X capability is present and enabled in the device +(through standard PCI configuration space) 4 bytes at byte offset +20 are used to map configuration change and queue interrupts to +MSI-X vectors. In this case, the ISR Status field is unused, and +device specific configuration starts at byte offset 24 in virtio +header structure. When MSI-X capability is not enabled, device +specific configuration starts at byte offset 20 in virtio header. + +Writing a valid MSI-X Table entry number, 0 to 0x7FF, to one of +Configuration/Queue Vector registers, maps interrupts triggered +by the configuration change/selected queue events respectively to +the corresponding MSI-X vector. To disable interrupts for a +specific event type, unmap it by writing a special NO_VECTOR +value: + +/* Vector value used to disable MSI for queue */ + +#define VIRTIO_MSI_NO_VECTOR 0xffff + +Reading these registers returns vector mapped to a given event, +or NO_VECTOR if unmapped. All queue and configuration change +events are unmapped by default. + +Note that mapping an event to vector might require allocating +internal device resources, and might fail. Devices report such +failures by returning the NO_VECTOR value when the relevant +Vector field is read. After mapping an event to vector, the +driver must verify success by reading the Vector field value: on +success, the previously written value is returned, and on +failure, NO_VECTOR is returned. If a mapping failure is detected, +the driver can retry mapping with fewervectors, or disable MSI-X. + + Virtqueue Configuration + +As a device can have zero or more virtqueues for bulk data +transport (for example, the network driver has two), the driver +needs to configure them as part of the device-specific +configuration. + +This is done as follows, for each virtqueue a device has: + + Write the virtqueue index (first queue is 0) to the Queue + Select field. + + Read the virtqueue size from the Queue Size field, which is + always a power of 2. This controls how big the virtqueue is + (see below). If this field is 0, the virtqueue does not exist. + + Allocate and zero virtqueue in contiguous physical memory, on a + 4096 byte alignment. Write the physical address, divided by + 4096 to the Queue Address field.[footnote: +The 4096 is based on the x86 page size, but it's also large +enough to ensure that the separate parts of the virtqueue are on +separate cache lines. +] + + Optionally, if MSI-X capability is present and enabled on the + device, select a vector to use to request interrupts triggered + by virtqueue events. Write the MSI-X Table entry number + corresponding to this vector in Queue Vector field. Read the + Queue Vector field: on success, previously written value is + returned; on failure, NO_VECTOR value is returned. + +The Queue Size field controls the total number of bytes required +for the virtqueue according to the following formula: + +#define ALIGN(x) (((x) + 4095) & ~4095) + +static inline unsigned vring_size(unsigned int qsz) + +{ + + return ALIGN(sizeof(struct vring_desc)*qsz + sizeof(u16)*(2 ++ qsz)) + + + ALIGN(sizeof(struct vring_used_elem)*qsz); + +} + +This currently wastes some space with padding, but also allows +future extensions. The virtqueue layout structure looks like this +(qsz is the Queue Size field, which is a variable, so this code +won't compile): + +struct vring { + + /* The actual descriptors (16 bytes each) */ + + struct vring_desc desc[qsz]; + + + + /* A ring of available descriptor heads with free-running +index. */ + + struct vring_avail avail; + + + + // Padding to the next 4096 boundary. + + char pad[]; + + + + // A ring of used descriptor heads with free-running index. + + struct vring_used used; + +}; + + A Note on Virtqueue Endianness + +Note that the endian of these fields and everything else in the +virtqueue is the native endian of the guest, not little-endian as +PCI normally is. This makes for simpler guest code, and it is +assumed that the host already has to be deeply aware of the guest +endian so such an “endian-aware” device is not a significant +issue. + + Descriptor Table + +The descriptor table refers to the buffers the guest is using for +the device. The addresses are physical addresses, and the buffers +can be chained via the next field. Each descriptor describes a +buffer which is read-only or write-only, but a chain of +descriptors can contain both read-only and write-only buffers. + +No descriptor chain may be more than 2^32 bytes long in total.struct vring_desc { + + /* Address (guest-physical). */ + + u64 addr; + + /* Length. */ + + u32 len; + +/* This marks a buffer as continuing via the next field. */ + +#define VRING_DESC_F_NEXT 1 + +/* This marks a buffer as write-only (otherwise read-only). */ + +#define VRING_DESC_F_WRITE 2 + +/* This means the buffer contains a list of buffer descriptors. +*/ + +#define VRING_DESC_F_INDIRECT 4 + + /* The flags as indicated above. */ + + u16 flags; + + /* Next field if flags & NEXT */ + + u16 next; + +}; + +The number of descriptors in the table is specified by the Queue +Size field for this virtqueue. + + Indirect Descriptors + +Some devices benefit by concurrently dispatching a large number +of large requests. The VIRTIO_RING_F_INDIRECT_DESC feature can be +used to allow this (see [cha:Reserved-Feature-Bits]). To increase +ring capacity it is possible to store a table of indirect +descriptors anywhere in memory, and insert a descriptor in main +virtqueue (with flags&INDIRECT on) that refers to memory buffer +containing this indirect descriptor table; fields addr and len +refer to the indirect table address and length in bytes, +respectively. The indirect table layout structure looks like this +(len is the length of the descriptor that refers to this table, +which is a variable, so this code won't compile): + +struct indirect_descriptor_table { + + /* The actual descriptors (16 bytes each) */ + + struct vring_desc desc[len / 16]; + +}; + +The first indirect descriptor is located at start of the indirect +descriptor table (index 0), additional indirect descriptors are +chained by next field. An indirect descriptor without next field +(with flags&NEXT off) signals the end of the indirect descriptor +table, and transfers control back to the main virtqueue. An +indirect descriptor can not refer to another indirect descriptor +table (flags&INDIRECT must be off). A single indirect descriptor +table can include both read-only and write-only descriptors; +write-only flag (flags&WRITE) in the descriptor that refers to it +is ignored. + + Available Ring + +The available ring refers to what descriptors we are offering the +device: it refers to the head of a descriptor chain. The “flags” +field is currently 0 or 1: 1 indicating that we do not need an +interrupt when the device consumes a descriptor from the +available ring. Alternatively, the guest can ask the device to +delay interrupts until an entry with an index specified by the “ +used_event” field is written in the used ring (equivalently, +until the idx field in the used ring will reach the value +used_event + 1). The method employed by the device is controlled +by the VIRTIO_RING_F_EVENT_IDX feature bit (see [cha:Reserved-Feature-Bits] +). This interrupt suppression is merely an optimization; it may +not suppress interrupts entirely. + +The “idx” field indicates where we would put the next descriptor +entry (modulo the ring size). This starts at 0, and increases. + +struct vring_avail { + +#define VRING_AVAIL_F_NO_INTERRUPT 1 + + u16 flags; + + u16 idx; + + u16 ring[qsz]; /* qsz is the Queue Size field read from device +*/ + + u16 used_event; + +}; + + Used Ring + +The used ring is where the device returns buffers once it is done +with them. The flags field can be used by the device to hint that +no notification is necessary when the guest adds to the available +ring. Alternatively, the “avail_event” field can be used by the +device to hint that no notification is necessary until an entry +with an index specified by the “avail_event” is written in the +available ring (equivalently, until the idx field in the +available ring will reach the value avail_event + 1). The method +employed by the device is controlled by the guest through the +VIRTIO_RING_F_EVENT_IDX feature bit (see [cha:Reserved-Feature-Bits] +). [footnote: +These fields are kept here because this is the only part of the +virtqueue written by the device +]. + +Each entry in the ring is a pair: the head entry of the +descriptor chain describing the buffer (this matches an entry +placed in the available ring by the guest earlier), and the total +of bytes written into the buffer. The latter is extremely useful +for guests using untrusted buffers: if you do not know exactly +how much has been written by the device, you usually have to zero +the buffer to ensure no data leakage occurs. + +/* u32 is used here for ids for padding reasons. */ + +struct vring_used_elem { + + /* Index of start of used descriptor chain. */ + + u32 id; + + /* Total length of the descriptor chain which was used +(written to) */ + + u32 len; + +}; + + + +struct vring_used { + +#define VRING_USED_F_NO_NOTIFY 1 + + u16 flags; + + u16 idx; + + struct vring_used_elem ring[qsz]; + + u16 avail_event; + +}; + + Helpers for Managing Virtqueues + +The Linux Kernel Source code contains the definitions above and +helper routines in a more usable form, in +include/linux/virtio_ring.h. This was explicitly licensed by IBM +and Red Hat under the (3-clause) BSD license so that it can be +freely used by all other projects, and is reproduced (with slight +variation to remove Linux assumptions) in Appendix A. + + Device Operation + +There are two parts to device operation: supplying new buffers to +the device, and processing used buffers from the device. As an +example, the virtio network device has two virtqueues: the +transmit virtqueue and the receive virtqueue. The driver adds +outgoing (read-only) packets to the transmit virtqueue, and then +frees them after they are used. Similarly, incoming (write-only) +buffers are added to the receive virtqueue, and processed after +they are used. + + Supplying Buffers to The Device + +Actual transfer of buffers from the guest OS to the device +operates as follows: + + Place the buffer(s) into free descriptor(s). + + If there are no free descriptors, the guest may choose to + notify the device even if notifications are suppressed (to + reduce latency).[footnote: +The Linux drivers do this only for read-only buffers: for +write-only buffers, it is assumed that the driver is merely +trying to keep the receive buffer ring full, and no notification +of this expected condition is necessary. +] + + Place the id of the buffer in the next ring entry of the + available ring. + + The steps (1) and (2) may be performed repeatedly if batching + is possible. + + A memory barrier should be executed to ensure the device sees + the updated descriptor table and available ring before the next + step. + + The available “idx” field should be increased by the number of + entries added to the available ring. + + A memory barrier should be executed to ensure that we update + the idx field before checking for notification suppression. + + If notifications are not suppressed, the device should be + notified of the new buffers. + +Note that the above code does not take precautions against the +available ring buffer wrapping around: this is not possible since +the ring buffer is the same size as the descriptor table, so step +(1) will prevent such a condition. + +In addition, the maximum queue size is 32768 (it must be a power +of 2 which fits in 16 bits), so the 16-bit “idx” value can always +distinguish between a full and empty buffer. + +Here is a description of each stage in more detail. + + Placing Buffers Into The Descriptor Table + +A buffer consists of zero or more read-only physically-contiguous +elements followed by zero or more physically-contiguous +write-only elements (it must have at least one element). This +algorithm maps it into the descriptor table: + + for each buffer element, b: + + Get the next free descriptor table entry, d + + Set d.addr to the physical address of the start of b + + Set d.len to the length of b. + + If b is write-only, set d.flags to VRING_DESC_F_WRITE, + otherwise 0. + + If there is a buffer element after this: + + Set d.next to the index of the next free descriptor element. + + Set the VRING_DESC_F_NEXT bit in d.flags. + +In practice, the d.next fields are usually used to chain free +descriptors, and a separate count kept to check there are enough +free descriptors before beginning the mappings. + + Updating The Available Ring + +The head of the buffer we mapped is the first d in the algorithm +above. A naive implementation would do the following: + +avail->ring[avail->idx % qsz] = head; + +However, in general we can add many descriptors before we update +the “idx” field (at which point they become visible to the +device), so we keep a counter of how many we've added: + +avail->ring[(avail->idx + added++) % qsz] = head; + + Updating The Index Field + +Once the idx field of the virtqueue is updated, the device will +be able to access the descriptor entries we've created and the +memory they refer to. This is why a memory barrier is generally +used before the idx update, to ensure it sees the most up-to-date +copy. + +The idx field always increments, and we let it wrap naturally at +65536: + +avail->idx += added; + + Notifying The Device + +Device notification occurs by writing the 16-bit virtqueue index +of this virtqueue to the Queue Notify field of the virtio header +in the first I/O region of the PCI device. This can be expensive, +however, so the device can suppress such notifications if it +doesn't need them. We have to be careful to expose the new idx +value before checking the suppression flag: it's OK to notify +gratuitously, but not to omit a required notification. So again, +we use a memory barrier here before reading the flags or the +avail_event field. + +If the VIRTIO_F_RING_EVENT_IDX feature is not negotiated, and if +the VRING_USED_F_NOTIFY flag is not set, we go ahead and write to +the PCI configuration space. + +If the VIRTIO_F_RING_EVENT_IDX feature is negotiated, we read the +avail_event field in the available ring structure. If the +available index crossed_the avail_event field value since the +last notification, we go ahead and write to the PCI configuration +space. The avail_event field wraps naturally at 65536 as well: + +(u16)(new_idx - avail_event - 1) < (u16)(new_idx - old_idx) + + Receiving Used Buffers From The + Device + +Once the device has used a buffer (read from or written to it, or +parts of both, depending on the nature of the virtqueue and the +device), it sends an interrupt, following an algorithm very +similar to the algorithm used for the driver to send the device a +buffer: + + Write the head descriptor number to the next field in the used + ring. + + Update the used ring idx. + + Determine whether an interrupt is necessary: + + If the VIRTIO_F_RING_EVENT_IDX feature is not negotiated: check + if f the VRING_AVAIL_F_NO_INTERRUPT flag is not set in avail- + >flags + + If the VIRTIO_F_RING_EVENT_IDX feature is negotiated: check + whether the used index crossed the used_event field value + since the last update. The used_event field wraps naturally + at 65536 as well:(u16)(new_idx - used_event - 1) < (u16)(new_idx - old_idx) + + If an interrupt is necessary: + + If MSI-X capability is disabled: + + Set the lower bit of the ISR Status field for the device. + + Send the appropriate PCI interrupt for the device. + + If MSI-X capability is enabled: + + Request the appropriate MSI-X interrupt message for the + device, Queue Vector field sets the MSI-X Table entry + number. + + If Queue Vector field value is NO_VECTOR, no interrupt + message is requested for this event. + +The guest interrupt handler should: + + If MSI-X capability is disabled: read the ISR Status field, + which will reset it to zero. If the lower bit is zero, the + interrupt was not for this device. Otherwise, the guest driver + should look through the used rings of each virtqueue for the + device, to see if any progress has been made by the device + which requires servicing. + + If MSI-X capability is enabled: look through the used rings of + each virtqueue mapped to the specific MSI-X vector for the + device, to see if any progress has been made by the device + which requires servicing. + +For each ring, guest should then disable interrupts by writing +VRING_AVAIL_F_NO_INTERRUPT flag in avail structure, if required. +It can then process used ring entries finally enabling interrupts +by clearing the VRING_AVAIL_F_NO_INTERRUPT flag or updating the +EVENT_IDX field in the available structure, Guest should then +execute a memory barrier, and then recheck the ring empty +condition. This is necessary to handle the case where, after the +last check and before enabling interrupts, an interrupt has been +suppressed by the device: + +vring_disable_interrupts(vq); + +for (;;) { + + if (vq->last_seen_used != vring->used.idx) { + + vring_enable_interrupts(vq); + + mb(); + + if (vq->last_seen_used != vring->used.idx) + + break; + + } + + struct vring_used_elem *e = +vring.used->ring[vq->last_seen_used%vsz]; + + process_buffer(e); + + vq->last_seen_used++; + +} + + Dealing With Configuration Changes + +Some virtio PCI devices can change the device configuration +state, as reflected in the virtio header in the PCI configuration +space. In this case: + + If MSI-X capability is disabled: an interrupt is delivered and + the second highest bit is set in the ISR Status field to + indicate that the driver should re-examine the configuration + space.Note that a single interrupt can indicate both that one + or more virtqueue has been used and that the configuration + space has changed: even if the config bit is set, virtqueues + must be scanned. + + If MSI-X capability is enabled: an interrupt message is + requested. The Configuration Vector field sets the MSI-X Table + entry number to use. If Configuration Vector field value is + NO_VECTOR, no interrupt message is requested for this event. + +Creating New Device Types + +Various considerations are necessary when creating a new device +type: + + How Many Virtqueues? + +It is possible that a very simple device will operate entirely +through its configuration space, but most will need at least one +virtqueue in which it will place requests. A device with both +input and output (eg. console and network devices described here) +need two queues: one which the driver fills with buffers to +receive input, and one which the driver places buffers to +transmit output. + + What Configuration Space Layout? + +Configuration space is generally used for rarely-changing or +initialization-time parameters. But it is a limited resource, so +it might be better to use a virtqueue to update configuration +information (the network device does this for filtering, +otherwise the table in the config space could potentially be very +large). + +Note that this space is generally the guest's native endian, +rather than PCI's little-endian. + + What Device Number? + +Currently device numbers are assigned quite freely: a simple +request mail to the author of this document or the Linux +virtualization mailing list[footnote: + +https://lists.linux-foundation.org/mailman/listinfo/virtualization +] will be sufficient to secure a unique one. + +Meanwhile for experimental drivers, use 65535 and work backwards. + + How many MSI-X vectors? + +Using the optional MSI-X capability devices can speed up +interrupt processing by removing the need to read ISR Status +register by guest driver (which might be an expensive operation), +reducing interrupt sharing between devices and queues within the +device, and handling interrupts from multiple CPUs. However, some +systems impose a limit (which might be as low as 256) on the +total number of MSI-X vectors that can be allocated to all +devices. Devices and/or device drivers should take this into +account, limiting the number of vectors used unless the device is +expected to cause a high volume of interrupts. Devices can +control the number of vectors used by limiting the MSI-X Table +Size or not presenting MSI-X capability in PCI configuration +space. Drivers can control this by mapping events to as small +number of vectors as possible, or disabling MSI-X capability +altogether. + + Message Framing + +The descriptors used for a buffer should not effect the semantics +of the message, except for the total length of the buffer. For +example, a network buffer consists of a 10 byte header followed +by the network packet. Whether this is presented in the ring +descriptor chain as (say) a 10 byte buffer and a 1514 byte +buffer, or a single 1524 byte buffer, or even three buffers, +should have no effect. + +In particular, no implementation should use the descriptor +boundaries to determine the size of any header in a request.[footnote: +The current qemu device implementations mistakenly insist that +the first descriptor cover the header in these cases exactly, so +a cautious driver should arrange it so. +] + + Device Improvements + +Any change to configuration space, or new virtqueues, or +behavioural changes, should be indicated by negotiation of a new +feature bit. This establishes clarity[footnote: +Even if it does mean documenting design or implementation +mistakes! +] and avoids future expansion problems. + +Clusters of functionality which are always implemented together +can use a single bit, but if one feature makes sense without the +others they should not be gratuitously grouped together to +conserve feature bits. We can always extend the spec when the +first person needs more than 24 feature bits for their device. + +[LaTeX Command: printnomenclature] + +Appendix A: virtio_ring.h + +#ifndef VIRTIO_RING_H + +#define VIRTIO_RING_H + +/* An interface for efficient virtio implementation. + + * + + * This header is BSD licensed so anyone can use the definitions + + * to implement compatible drivers/servers. + + * + + * Copyright 2007, 2009, IBM Corporation + + * Copyright 2011, Red Hat, Inc + + * All rights reserved. + + * + + * Redistribution and use in source and binary forms, with or +without + + * modification, are permitted provided that the following +conditions + + * are met: + + * 1. Redistributions of source code must retain the above +copyright + + * notice, this list of conditions and the following +disclaimer. + + * 2. Redistributions in binary form must reproduce the above +copyright + + * notice, this list of conditions and the following +disclaimer in the + + * documentation and/or other materials provided with the +distribution. + + * 3. Neither the name of IBM nor the names of its contributors + + * may be used to endorse or promote products derived from +this software + + * without specific prior written permission. + + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS ``AS IS'' AND + + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE + + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE + + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE +LIABLE + + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL + + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS + + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) + + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT + + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY + + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF + + * SUCH DAMAGE. + + */ + + + +/* This marks a buffer as continuing via the next field. */ + +#define VRING_DESC_F_NEXT 1 + +/* This marks a buffer as write-only (otherwise read-only). */ + +#define VRING_DESC_F_WRITE 2 + + + +/* The Host uses this in used->flags to advise the Guest: don't +kick me + + * when you add a buffer. It's unreliable, so it's simply an + + * optimization. Guest will still kick if it's out of buffers. +*/ + +#define VRING_USED_F_NO_NOTIFY 1 + +/* The Guest uses this in avail->flags to advise the Host: don't + + * interrupt me when you consume a buffer. It's unreliable, so +it's + + * simply an optimization. */ + +#define VRING_AVAIL_F_NO_INTERRUPT 1 + + + +/* Virtio ring descriptors: 16 bytes. + + * These can chain together via "next". */ + +struct vring_desc { + + /* Address (guest-physical). */ + + uint64_t addr; + + /* Length. */ + + uint32_t len; + + /* The flags as indicated above. */ + + uint16_t flags; + + /* We chain unused descriptors via this, too */ + + uint16_t next; + +}; + + + +struct vring_avail { + + uint16_t flags; + + uint16_t idx; + + uint16_t ring[]; + + uint16_t used_event; + +}; + + + +/* u32 is used here for ids for padding reasons. */ + +struct vring_used_elem { + + /* Index of start of used descriptor chain. */ + + uint32_t id; + + /* Total length of the descriptor chain which was written +to. */ + + uint32_t len; + +}; + + + +struct vring_used { + + uint16_t flags; + + uint16_t idx; + + struct vring_used_elem ring[]; + + uint16_t avail_event; + +}; + + + +struct vring { + + unsigned int num; + + + + struct vring_desc *desc; + + struct vring_avail *avail; + + struct vring_used *used; + +}; + + + +/* The standard layout for the ring is a continuous chunk of +memory which + + * looks like this. We assume num is a power of 2. + + * + + * struct vring { + + * // The actual descriptors (16 bytes each) + + * struct vring_desc desc[num]; + + * + + * // A ring of available descriptor heads with free-running +index. + + * __u16 avail_flags; + + * __u16 avail_idx; + + * __u16 available[num]; + + * + + * // Padding to the next align boundary. + + * char pad[]; + + * + + * // A ring of used descriptor heads with free-running +index. + + * __u16 used_flags; + + * __u16 EVENT_IDX; + + * struct vring_used_elem used[num]; + + * }; + + * Note: for virtio PCI, align is 4096. + + */ + +static inline void vring_init(struct vring *vr, unsigned int num, +void *p, + + unsigned long align) + +{ + + vr->num = num; + + vr->desc = p; + + vr->avail = p + num*sizeof(struct vring_desc); + + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + + + align-1) + + & ~(align - 1)); + +} + + + +static inline unsigned vring_size(unsigned int num, unsigned long +align) + +{ + + return ((sizeof(struct vring_desc)*num + +sizeof(uint16_t)*(2+num) + + + align - 1) & ~(align - 1)) + + + sizeof(uint16_t)*3 + sizeof(struct +vring_used_elem)*num; + +} + + + +static inline int vring_need_event(uint16_t event_idx, uint16_t +new_idx, uint16_t old_idx) + +{ + + return (uint16_t)(new_idx - event_idx - 1) < +(uint16_t)(new_idx - old_idx); + +} + +#endif /* VIRTIO_RING_H */ + +Appendix B: Reserved Feature Bits + +Currently there are five device-independent feature bits defined: + + VIRTIO_F_NOTIFY_ON_EMPTY (24) Negotiating this feature + indicates that the driver wants an interrupt if the device runs + out of available descriptors on a virtqueue, even though + interrupts are suppressed using the VRING_AVAIL_F_NO_INTERRUPT + flag or the used_event field. An example of this is the + networking driver: it doesn't need to know every time a packet + is transmitted, but it does need to free the transmitted + packets a finite time after they are transmitted. It can avoid + using a timer if the device interrupts it when all the packets + are transmitted. + + VIRTIO_F_RING_INDIRECT_DESC (28) Negotiating this feature + indicates that the driver can use descriptors with the + VRING_DESC_F_INDIRECT flag set, as described in [sub:Indirect-Descriptors] + . + + VIRTIO_F_RING_EVENT_IDX(29) This feature enables the used_event + and the avail_event fields. If set, it indicates that the + device should ignore the flags field in the available ring + structure. Instead, the used_event field in this structure is + used by guest to suppress device interrupts. Further, the + driver should ignore the flags field in the used ring + structure. Instead, the avail_event field in this structure is + used by the device to suppress notifications. If unset, the + driver should ignore the used_event field; the device should + ignore the avail_event field; the flags field is used + + VIRTIO_F_BAD_FEATURE(30) This feature should never be + negotiated by the guest; doing so is an indication that the + guest is faulty[footnote: +An experimental virtio PCI driver contained in Linux version +2.6.25 had this problem, and this feature bit can be used to +detect it. +] + + VIRTIO_F_FEATURES_HIGH(31) This feature indicates that the + device supports feature bits 32:63. If unset, feature bits + 32:63 are unset. + +Appendix C: Network Device + +The virtio network device is a virtual ethernet card, and is the +most complex of the devices supported so far by virtio. It has +enhanced rapidly and demonstrates clearly how support for new +features should be added to an existing device. Empty buffers are +placed in one virtqueue for receiving packets, and outgoing +packets are enqueued into another for transmission in that order. +A third command queue is used to control advanced filtering +features. + + Configuration + + Subsystem Device ID 1 + + Virtqueues 0:receiveq. 1:transmitq. 2:controlq[footnote: +Only if VIRTIO_NET_F_CTRL_VQ set +] + + Feature bits + + VIRTIO_NET_F_CSUM (0) Device handles packets with partial + checksum + + VIRTIO_NET_F_GUEST_CSUM (1) Guest handles packets with partial + checksum + + VIRTIO_NET_F_MAC (5) Device has given MAC address. + + VIRTIO_NET_F_GSO (6) (Deprecated) device handles packets with + any GSO type.[footnote: +It was supposed to indicate segmentation offload support, but +upon further investigation it became clear that multiple bits +were required. +] + + VIRTIO_NET_F_GUEST_TSO4 (7) Guest can receive TSOv4. + + VIRTIO_NET_F_GUEST_TSO6 (8) Guest can receive TSOv6. + + VIRTIO_NET_F_GUEST_ECN (9) Guest can receive TSO with ECN. + + VIRTIO_NET_F_GUEST_UFO (10) Guest can receive UFO. + + VIRTIO_NET_F_HOST_TSO4 (11) Device can receive TSOv4. + + VIRTIO_NET_F_HOST_TSO6 (12) Device can receive TSOv6. + + VIRTIO_NET_F_HOST_ECN (13) Device can receive TSO with ECN. + + VIRTIO_NET_F_HOST_UFO (14) Device can receive UFO. + + VIRTIO_NET_F_MRG_RXBUF (15) Guest can merge receive buffers. + + VIRTIO_NET_F_STATUS (16) Configuration status field is + available. + + VIRTIO_NET_F_CTRL_VQ (17) Control channel is available. + + VIRTIO_NET_F_CTRL_RX (18) Control channel RX mode support. + + VIRTIO_NET_F_CTRL_VLAN (19) Control channel VLAN filtering. + + Device configuration layout Two configuration fields are + currently defined. The mac address field always exists (though + is only valid if VIRTIO_NET_F_MAC is set), and the status field + only exists if VIRTIO_NET_F_STATUS is set. Only one bit is + currently defined for the status field: VIRTIO_NET_S_LINK_UP. #define VIRTIO_NET_S_LINK_UP 1 + + + +struct virtio_net_config { + + u8 mac[6]; + + u16 status; + +}; + + Device Initialization + + The initialization routine should identify the receive and + transmission virtqueues. + + If the VIRTIO_NET_F_MAC feature bit is set, the configuration + space “mac” entry indicates the “physical” address of the the + network card, otherwise a private MAC address should be + assigned. All guests are expected to negotiate this feature if + it is set. + + If the VIRTIO_NET_F_CTRL_VQ feature bit is negotiated, identify + the control virtqueue. + + If the VIRTIO_NET_F_STATUS feature bit is negotiated, the link + status can be read from the bottom bit of the “status” config + field. Otherwise, the link should be assumed active. + + The receive virtqueue should be filled with receive buffers. + This is described in detail below in “Setting Up Receive + Buffers”. + + A driver can indicate that it will generate checksumless + packets by negotating the VIRTIO_NET_F_CSUM feature. This “ + checksum offload” is a common feature on modern network cards. + + If that feature is negotiated, a driver can use TCP or UDP + segmentation offload by negotiating the VIRTIO_NET_F_HOST_TSO4 + (IPv4 TCP), VIRTIO_NET_F_HOST_TSO6 (IPv6 TCP) and + VIRTIO_NET_F_HOST_UFO (UDP fragmentation) features. It should + not send TCP packets requiring segmentation offload which have + the Explicit Congestion Notification bit set, unless the + VIRTIO_NET_F_HOST_ECN feature is negotiated.[footnote: +This is a common restriction in real, older network cards. +] + + The converse features are also available: a driver can save the + virtual device some work by negotiating these features.[footnote: +For example, a network packet transported between two guests on +the same system may not require checksumming at all, nor +segmentation, if both guests are amenable. +] The VIRTIO_NET_F_GUEST_CSUM feature indicates that partially + checksummed packets can be received, and if it can do that then + the VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_UFO and VIRTIO_NET_F_GUEST_ECN are the input + equivalents of the features described above. See “Receiving + Packets” below. + + Device Operation + +Packets are transmitted by placing them in the transmitq, and +buffers for incoming packets are placed in the receiveq. In each +case, the packet itself is preceeded by a header: + +struct virtio_net_hdr { + +#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 + + u8 flags; + +#define VIRTIO_NET_HDR_GSO_NONE 0 + +#define VIRTIO_NET_HDR_GSO_TCPV4 1 + +#define VIRTIO_NET_HDR_GSO_UDP 3 + +#define VIRTIO_NET_HDR_GSO_TCPV6 4 + +#define VIRTIO_NET_HDR_GSO_ECN 0x80 + + u8 gso_type; + + u16 hdr_len; + + u16 gso_size; + + u16 csum_start; + + u16 csum_offset; + +/* Only if VIRTIO_NET_F_MRG_RXBUF: */ + + u16 num_buffers + +}; + +The controlq is used to control device features such as +filtering. + + Packet Transmission + +Transmitting a single packet is simple, but varies depending on +the different features the driver negotiated. + + If the driver negotiated VIRTIO_NET_F_CSUM, and the packet has + not been fully checksummed, then the virtio_net_hdr's fields + are set as follows. Otherwise, the packet must be fully + checksummed, and flags is zero. + + flags has the VIRTIO_NET_HDR_F_NEEDS_CSUM set, + + csum_start is set to the offset within + the packet to begin checksumming, and + + csum_offset indicates how many bytes after the csum_start the + new (16 bit ones' complement) checksum should be placed.[footnote: +For example, consider a partially checksummed TCP (IPv4) packet. +It will have a 14 byte ethernet header and 20 byte IP header +followed by the TCP header (with the TCP checksum field 16 bytes +into that header). csum_start will be 14+20 = 34 (the TCP +checksum includes the header), and csum_offset will be 16. The +value in the TCP checksum field will be the sum of the TCP pseudo +header, so that replacing it by the ones' complement checksum of +the TCP header and body will give the correct result. +] + + If the driver negotiated + VIRTIO_NET_F_HOST_TSO4, TSO6 or UFO, and the packet requires + TCP segmentation or UDP fragmentation, then the “gso_type” + field is set to VIRTIO_NET_HDR_GSO_TCPV4, TCPV6 or UDP. + (Otherwise, it is set to VIRTIO_NET_HDR_GSO_NONE). In this + case, packets larger than 1514 bytes can be transmitted: the + metadata indicates how to replicate the packet header to cut it + into smaller packets. The other gso fields are set: + + hdr_len is a hint to the device as to how much of the header + needs to be kept to copy into each packet, usually set to the + length of the headers, including the transport header.[footnote: +Due to various bugs in implementations, this field is not useful +as a guarantee of the transport header size. +] + + gso_size is the size of the packet beyond that header (ie. + MSS). + + If the driver negotiated the VIRTIO_NET_F_HOST_ECN feature, the + VIRTIO_NET_HDR_GSO_ECN bit may be set in “gso_type” as well, + indicating that the TCP packet has the ECN bit set.[footnote: +This case is not handled by some older hardware, so is called out +specifically in the protocol. +] + + If the driver negotiated the VIRTIO_NET_F_MRG_RXBUF feature, + the num_buffers field is set to zero. + + The header and packet are added as one output buffer to the + transmitq, and the device is notified of the new entry (see [sub:Notifying-The-Device] + ).[footnote: +Note that the header will be two bytes longer for the +VIRTIO_NET_F_MRG_RXBUF case. +] + + Packet Transmission Interrupt + +Often a driver will suppress transmission interrupts using the +VRING_AVAIL_F_NO_INTERRUPT flag (see [sub:Receiving-Used-Buffers] +) and check for used packets in the transmit path of following +packets. However, it will still receive interrupts if the +VIRTIO_F_NOTIFY_ON_EMPTY feature is negotiated, indicating that +the transmission queue is completely emptied. + +The normal behavior in this interrupt handler is to retrieve and +new descriptors from the used ring and free the corresponding +headers and packets. + + Setting Up Receive Buffers + +It is generally a good idea to keep the receive virtqueue as +fully populated as possible: if it runs out, network performance +will suffer. + +If the VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6 or +VIRTIO_NET_F_GUEST_UFO features are used, the Guest will need to +accept packets of up to 65550 bytes long (the maximum size of a +TCP or UDP packet, plus the 14 byte ethernet header), otherwise +1514 bytes. So unless VIRTIO_NET_F_MRG_RXBUF is negotiated, every +buffer in the receive queue needs to be at least this length [footnote: +Obviously each one can be split across multiple descriptor +elements. +]. + +If VIRTIO_NET_F_MRG_RXBUF is negotiated, each buffer must be at +least the size of the struct virtio_net_hdr. + + Packet Receive Interrupt + +When a packet is copied into a buffer in the receiveq, the +optimal path is to disable further interrupts for the receiveq +(see [sub:Receiving-Used-Buffers]) and process packets until no +more are found, then re-enable them. + +Processing packet involves: + + If the driver negotiated the VIRTIO_NET_F_MRG_RXBUF feature, + then the “num_buffers” field indicates how many descriptors + this packet is spread over (including this one). This allows + receipt of large packets without having to allocate large + buffers. In this case, there will be at least “num_buffers” in + the used ring, and they should be chained together to form a + single packet. The other buffers will not begin with a struct + virtio_net_hdr. + + If the VIRTIO_NET_F_MRG_RXBUF feature was not negotiated, or + the “num_buffers” field is one, then the entire packet will be + contained within this buffer, immediately following the struct + virtio_net_hdr. + + If the VIRTIO_NET_F_GUEST_CSUM feature was negotiated, the + VIRTIO_NET_HDR_F_NEEDS_CSUM bit in the “flags” field may be + set: if so, the checksum on the packet is incomplete and the “ + csum_start” and “csum_offset” fields indicate how to calculate + it (see [ite:csum_start-is-set]). + + If the VIRTIO_NET_F_GUEST_TSO4, TSO6 or UFO options were + negotiated, then the “gso_type” may be something other than + VIRTIO_NET_HDR_GSO_NONE, and the “gso_size” field indicates the + desired MSS (see [enu:If-the-driver]).Control Virtqueue + +The driver uses the control virtqueue (if VIRTIO_NET_F_VTRL_VQ is +negotiated) to send commands to manipulate various features of +the device which would not easily map into the configuration +space. + +All commands are of the following form: + +struct virtio_net_ctrl { + + u8 class; + + u8 command; + + u8 command-specific-data[]; + + u8 ack; + +}; + + + +/* ack values */ + +#define VIRTIO_NET_OK 0 + +#define VIRTIO_NET_ERR 1 + +The class, command and command-specific-data are set by the +driver, and the device sets the ack byte. There is little it can +do except issue a diagnostic if the ack byte is not +VIRTIO_NET_OK. + + Packet Receive Filtering + +If the VIRTIO_NET_F_CTRL_RX feature is negotiated, the driver can +send control commands for promiscuous mode, multicast receiving, +and filtering of MAC addresses. + +Note that in general, these commands are best-effort: unwanted +packets may still arrive. + + Setting Promiscuous Mode + +#define VIRTIO_NET_CTRL_RX 0 + + #define VIRTIO_NET_CTRL_RX_PROMISC 0 + + #define VIRTIO_NET_CTRL_RX_ALLMULTI 1 + +The class VIRTIO_NET_CTRL_RX has two commands: +VIRTIO_NET_CTRL_RX_PROMISC turns promiscuous mode on and off, and +VIRTIO_NET_CTRL_RX_ALLMULTI turns all-multicast receive on and +off. The command-specific-data is one byte containing 0 (off) or +1 (on). + + Setting MAC Address Filtering + +struct virtio_net_ctrl_mac { + + u32 entries; + + u8 macs[entries][ETH_ALEN]; + +}; + + + +#define VIRTIO_NET_CTRL_MAC 1 + + #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 + +The device can filter incoming packets by any number of +destination MAC addresses.[footnote: +Since there are no guarentees, it can use a hash filter +orsilently switch to allmulti or promiscuous mode if it is given +too many addresses. +] This table is set using the class VIRTIO_NET_CTRL_MAC and the +command VIRTIO_NET_CTRL_MAC_TABLE_SET. The command-specific-data +is two variable length tables of 6-byte MAC addresses. The first +table contains unicast addresses, and the second contains +multicast addresses. + + VLAN Filtering + +If the driver negotiates the VIRTION_NET_F_CTRL_VLAN feature, it +can control a VLAN filter table in the device. + +#define VIRTIO_NET_CTRL_VLAN 2 + + #define VIRTIO_NET_CTRL_VLAN_ADD 0 + + #define VIRTIO_NET_CTRL_VLAN_DEL 1 + +Both the VIRTIO_NET_CTRL_VLAN_ADD and VIRTIO_NET_CTRL_VLAN_DEL +command take a 16-bit VLAN id as the command-specific-data. + +Appendix D: Block Device + +The virtio block device is a simple virtual block device (ie. +disk). Read and write requests (and other exotic requests) are +placed in the queue, and serviced (probably out of order) by the +device except where noted. + + Configuration + + Subsystem Device ID 2 + + Virtqueues 0:requestq. + + Feature bits + + VIRTIO_BLK_F_BARRIER (0) Host supports request barriers. + + VIRTIO_BLK_F_SIZE_MAX (1) Maximum size of any single segment is + in “size_max”. + + VIRTIO_BLK_F_SEG_MAX (2) Maximum number of segments in a + request is in “seg_max”. + + VIRTIO_BLK_F_GEOMETRY (4) Disk-style geometry specified in “ + geometry”. + + VIRTIO_BLK_F_RO (5) Device is read-only. + + VIRTIO_BLK_F_BLK_SIZE (6) Block size of disk is in “blk_size”. + + VIRTIO_BLK_F_SCSI (7) Device supports scsi packet commands. + + VIRTIO_BLK_F_FLUSH (9) Cache flush command support. + + + + Device configuration layout The capacity of the device + (expressed in 512-byte sectors) is always present. The + availability of the others all depend on various feature bits + as indicated above. struct virtio_blk_config { + + u64 capacity; + + u32 size_max; + + u32 seg_max; + + struct virtio_blk_geometry { + + u16 cylinders; + + u8 heads; + + u8 sectors; + + } geometry; + + u32 blk_size; + + + +}; + + Device Initialization + + The device size should be read from the “capacity” + configuration field. No requests should be submitted which goes + beyond this limit. + + If the VIRTIO_BLK_F_BLK_SIZE feature is negotiated, the + blk_size field can be read to determine the optimal sector size + for the driver to use. This does not effect the units used in + the protocol (always 512 bytes), but awareness of the correct + value can effect performance. + + If the VIRTIO_BLK_F_RO feature is set by the device, any write + requests will fail. + + + + Device Operation + +The driver queues requests to the virtqueue, and they are used by +the device (not necessarily in order). Each request is of form: + +struct virtio_blk_req { + + + + u32 type; + + u32 ioprio; + + u64 sector; + + char data[][512]; + + u8 status; + +}; + +If the device has VIRTIO_BLK_F_SCSI feature, it can also support +scsi packet command requests, each of these requests is of form:struct virtio_scsi_pc_req { + + u32 type; + + u32 ioprio; + + u64 sector; + + char cmd[]; + + char data[][512]; + +#define SCSI_SENSE_BUFFERSIZE 96 + + u8 sense[SCSI_SENSE_BUFFERSIZE]; + + u32 errors; + + u32 data_len; + + u32 sense_len; + + u32 residual; + + u8 status; + +}; + +The type of the request is either a read (VIRTIO_BLK_T_IN), a +write (VIRTIO_BLK_T_OUT), a scsi packet command +(VIRTIO_BLK_T_SCSI_CMD or VIRTIO_BLK_T_SCSI_CMD_OUT[footnote: +the SCSI_CMD and SCSI_CMD_OUT types are equivalent, the device +does not distinguish between them +]) or a flush (VIRTIO_BLK_T_FLUSH or VIRTIO_BLK_T_FLUSH_OUT[footnote: +the FLUSH and FLUSH_OUT types are equivalent, the device does not +distinguish between them +]). If the device has VIRTIO_BLK_F_BARRIER feature the high bit +(VIRTIO_BLK_T_BARRIER) indicates that this request acts as a +barrier and that all preceeding requests must be complete before +this one, and all following requests must not be started until +this is complete. Note that a barrier does not flush caches in +the underlying backend device in host, and thus does not serve as +data consistency guarantee. Driver must use FLUSH request to +flush the host cache. + +#define VIRTIO_BLK_T_IN 0 + +#define VIRTIO_BLK_T_OUT 1 + +#define VIRTIO_BLK_T_SCSI_CMD 2 + +#define VIRTIO_BLK_T_SCSI_CMD_OUT 3 + +#define VIRTIO_BLK_T_FLUSH 4 + +#define VIRTIO_BLK_T_FLUSH_OUT 5 + +#define VIRTIO_BLK_T_BARRIER 0x80000000 + +The ioprio field is a hint about the relative priorities of +requests to the device: higher numbers indicate more important +requests. + +The sector number indicates the offset (multiplied by 512) where +the read or write is to occur. This field is unused and set to 0 +for scsi packet commands and for flush commands. + +The cmd field is only present for scsi packet command requests, +and indicates the command to perform. This field must reside in a +single, separate read-only buffer; command length can be derived +from the length of this buffer. + +Note that these first three (four for scsi packet commands) +fields are always read-only: the data field is either read-only +or write-only, depending on the request. The size of the read or +write can be derived from the total size of the request buffers. + +The sense field is only present for scsi packet command requests, +and indicates the buffer for scsi sense data. + +The data_len field is only present for scsi packet command +requests, this field is deprecated, and should be ignored by the +driver. Historically, devices copied data length there. + +The sense_len field is only present for scsi packet command +requests and indicates the number of bytes actually written to +the sense buffer. + +The residual field is only present for scsi packet command +requests and indicates the residual size, calculated as data +length - number of bytes actually transferred. + +The final status byte is written by the device: either +VIRTIO_BLK_S_OK for success, VIRTIO_BLK_S_IOERR for host or guest +error or VIRTIO_BLK_S_UNSUPP for a request unsupported by host:#define VIRTIO_BLK_S_OK 0 + +#define VIRTIO_BLK_S_IOERR 1 + +#define VIRTIO_BLK_S_UNSUPP 2 + +Historically, devices assumed that the fields type, ioprio and +sector reside in a single, separate read-only buffer; the fields +errors, data_len, sense_len and residual reside in a single, +separate write-only buffer; the sense field in a separate +write-only buffer of size 96 bytes, by itself; the fields errors, +data_len, sense_len and residual in a single write-only buffer; +and the status field is a separate read-only buffer of size 1 +byte, by itself. + +Appendix E: Console Device + +The virtio console device is a simple device for data input and +output. A device may have one or more ports. Each port has a pair +of input and output virtqueues. Moreover, a device has a pair of +control IO virtqueues. The control virtqueues are used to +communicate information between the device and the driver about +ports being opened and closed on either side of the connection, +indication from the host about whether a particular port is a +console port, adding new ports, port hot-plug/unplug, etc., and +indication from the guest about whether a port or a device was +successfully added, port open/close, etc.. For data IO, one or +more empty buffers are placed in the receive queue for incoming +data and outgoing characters are placed in the transmit queue. + + Configuration + + Subsystem Device ID 3 + + Virtqueues 0:receiveq(port0). 1:transmitq(port0), 2:control + receiveq[footnote: +Ports 2 onwards only if VIRTIO_CONSOLE_F_MULTIPORT is set +], 3:control transmitq, 4:receiveq(port1), 5:transmitq(port1), + ... + + Feature bits + + VIRTIO_CONSOLE_F_SIZE (0) Configuration cols and rows fields + are valid. + + VIRTIO_CONSOLE_F_MULTIPORT(1) Device has support for multiple + ports; configuration fields nr_ports and max_nr_ports are + valid and control virtqueues will be used. + + Device configuration layout The size of the console is supplied + in the configuration space if the VIRTIO_CONSOLE_F_SIZE feature + is set. Furthermore, if the VIRTIO_CONSOLE_F_MULTIPORT feature + is set, the maximum number of ports supported by the device can + be fetched.struct virtio_console_config { + + u16 cols; + + u16 rows; + + + + u32 max_nr_ports; + +}; + + Device Initialization + + If the VIRTIO_CONSOLE_F_SIZE feature is negotiated, the driver + can read the console dimensions from the configuration fields. + + If the VIRTIO_CONSOLE_F_MULTIPORT feature is negotiated, the + driver can spawn multiple ports, not all of which may be + attached to a console. Some could be generic ports. In this + case, the control virtqueues are enabled and according to the + max_nr_ports configuration-space value, the appropriate number + of virtqueues are created. A control message indicating the + driver is ready is sent to the host. The host can then send + control messages for adding new ports to the device. After + creating and initializing each port, a + VIRTIO_CONSOLE_PORT_READY control message is sent to the host + for that port so the host can let us know of any additional + configuration options set for that port. + + The receiveq for each port is populated with one or more + receive buffers. + + Device Operation + + For output, a buffer containing the characters is placed in the + port's transmitq.[footnote: +Because this is high importance and low bandwidth, the current +Linux implementation polls for the buffer to be used, rather than +waiting for an interrupt, simplifying the implementation +significantly. However, for generic serial ports with the +O_NONBLOCK flag set, the polling limitation is relaxed and the +consumed buffers are freed upon the next write or poll call or +when a port is closed or hot-unplugged. +] + + When a buffer is used in the receiveq (signalled by an + interrupt), the contents is the input to the port associated + with the virtqueue for which the notification was received. + + If the driver negotiated the VIRTIO_CONSOLE_F_SIZE feature, a + configuration change interrupt may occur. The updated size can + be read from the configuration fields. + + If the driver negotiated the VIRTIO_CONSOLE_F_MULTIPORT + feature, active ports are announced by the host using the + VIRTIO_CONSOLE_PORT_ADD control message. The same message is + used for port hot-plug as well. + + If the host specified a port `name', a sysfs attribute is + created with the name filled in, so that udev rules can be + written that can create a symlink from the port's name to the + char device for port discovery by applications in the guest. + + Changes to ports' state are effected by control messages. + Appropriate action is taken on the port indicated in the + control message. The layout of the structure of the control + buffer and the events associated are:struct virtio_console_control { + + uint32_t id; /* Port number */ + + uint16_t event; /* The kind of control event */ + + uint16_t value; /* Extra information for the event */ + +}; + + + +/* Some events for the internal messages (control packets) */ + + + +#define VIRTIO_CONSOLE_DEVICE_READY 0 + +#define VIRTIO_CONSOLE_PORT_ADD 1 + +#define VIRTIO_CONSOLE_PORT_REMOVE 2 + +#define VIRTIO_CONSOLE_PORT_READY 3 + +#define VIRTIO_CONSOLE_CONSOLE_PORT 4 + +#define VIRTIO_CONSOLE_RESIZE 5 + +#define VIRTIO_CONSOLE_PORT_OPEN 6 + +#define VIRTIO_CONSOLE_PORT_NAME 7 + +Appendix F: Entropy Device + +The virtio entropy device supplies high-quality randomness for +guest use. + + Configuration + + Subsystem Device ID 4 + + Virtqueues 0:requestq. + + Feature bits None currently defined + + Device configuration layout None currently defined. + + Device Initialization + + The virtqueue is initialized + + Device Operation + +When the driver requires random bytes, it places the descriptor +of one or more buffers in the queue. It will be completely filled +by random data by the device. + +Appendix G: Memory Balloon Device + +The virtio memory balloon device is a primitive device for +managing guest memory: the device asks for a certain amount of +memory, and the guest supplies it (or withdraws it, if the device +has more than it asks for). This allows the guest to adapt to +changes in allowance of underlying physical memory. If the +feature is negotiated, the device can also be used to communicate +guest memory statistics to the host. + + Configuration + + Subsystem Device ID 5 + + Virtqueues 0:inflateq. 1:deflateq. 2:statsq.[footnote: +Only if VIRTIO_BALLON_F_STATS_VQ set +] + + Feature bits + + VIRTIO_BALLOON_F_MUST_TELL_HOST (0) Host must be told before + pages from the balloon are used. + + VIRTIO_BALLOON_F_STATS_VQ (1) A virtqueue for reporting guest + memory statistics is present. + + Device configuration layout Both fields of this configuration + are always available. Note that they are little endian, despite + convention that device fields are guest endian:struct virtio_balloon_config { + + u32 num_pages; + + u32 actual; + +}; + + Device Initialization + + The inflate and deflate virtqueues are identified. + + If the VIRTIO_BALLOON_F_STATS_VQ feature bit is negotiated: + + Identify the stats virtqueue. + + Add one empty buffer to the stats virtqueue and notify the + host. + +Device operation begins immediately. + + Device Operation + + Memory Ballooning The device is driven by the receipt of a + configuration change interrupt. + + The “num_pages” configuration field is examined. If this is + greater than the “actual” number of pages, memory must be given + to the balloon. If it is less than the “actual” number of + pages, memory may be taken back from the balloon for general + use. + + To supply memory to the balloon (aka. inflate): + + The driver constructs an array of addresses of unused memory + pages. These addresses are divided by 4096[footnote: +This is historical, and independent of the guest page size +] and the descriptor describing the resulting 32-bit array is + added to the inflateq. + + To remove memory from the balloon (aka. deflate): + + The driver constructs an array of addresses of memory pages it + has previously given to the balloon, as described above. This + descriptor is added to the deflateq. + + If the VIRTIO_BALLOON_F_MUST_TELL_HOST feature is set, the + guest may not use these requested pages until that descriptor + in the deflateq has been used by the device. + + Otherwise, the guest may begin to re-use pages previously given + to the balloon before the device has acknowledged their + withdrawl. [footnote: +In this case, deflation advice is merely a courtesy +] + + In either case, once the device has completed the inflation or + deflation, the “actual” field of the configuration should be + updated to reflect the new number of pages in the balloon.[footnote: +As updates to configuration space are not atomic, this field +isn't particularly reliable, but can be used to diagnose buggy +guests. +] + + Memory Statistics + +The stats virtqueue is atypical because communication is driven +by the device (not the driver). The channel becomes active at +driver initialization time when the driver adds an empty buffer +and notifies the device. A request for memory statistics proceeds +as follows: + + The device pushes the buffer onto the used ring and sends an + interrupt. + + The driver pops the used buffer and discards it. + + The driver collects memory statistics and writes them into a + new buffer. + + The driver adds the buffer to the virtqueue and notifies the + device. + + The device pops the buffer (retaining it to initiate a + subsequent request) and consumes the statistics. + + Memory Statistics Format Each statistic consists of a 16 bit + tag and a 64 bit value. Both quantities are represented in the + native endian of the guest. All statistics are optional and the + driver may choose which ones to supply. To guarantee backwards + compatibility, unsupported statistics should be omitted. + + struct virtio_balloon_stat { + +#define VIRTIO_BALLOON_S_SWAP_IN 0 + +#define VIRTIO_BALLOON_S_SWAP_OUT 1 + +#define VIRTIO_BALLOON_S_MAJFLT 2 + +#define VIRTIO_BALLOON_S_MINFLT 3 + +#define VIRTIO_BALLOON_S_MEMFREE 4 + +#define VIRTIO_BALLOON_S_MEMTOT 5 + + u16 tag; + + u64 val; + +} __attribute__((packed)); + + Tags + + VIRTIO_BALLOON_S_SWAP_IN The amount of memory that has been + swapped in (in bytes). + + VIRTIO_BALLOON_S_SWAP_OUT The amount of memory that has been + swapped out to disk (in bytes). + + VIRTIO_BALLOON_S_MAJFLT The number of major page faults that + have occurred. + + VIRTIO_BALLOON_S_MINFLT The number of minor page faults that + have occurred. + + VIRTIO_BALLOON_S_MEMFREE The amount of memory not being used + for any purpose (in bytes). + + VIRTIO_BALLOON_S_MEMTOT The total amount of memory available + (in bytes). + -- cgit v0.10.2 From e22a539824e8ddb82c87b4f415165ede82e6ab56 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Mon, 15 Aug 2011 10:15:10 +0930 Subject: lguest: allow booting guest with CONFIG_RELOCATABLE=y The CONFIG_RELOCATABLE code tries to align the unpack destination to the value of 'kernel_alignment' in the setup_hdr. If that's 0, it tries to unpack to address 0, which in fact causes the gunzip code to call 'error("Out of memory while allocating output buffer")'. The bootloader (ie. the lguest Launcher in this case) should be doing setting this field; the normal bzImage is 16M, we can use the same. Reported-by: Stefanos Geraggelos Signed-off-by: Rusty Russell Cc: stable@kernel.org diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c index 043bd7d..d928c13 100644 --- a/Documentation/virtual/lguest/lguest.c +++ b/Documentation/virtual/lguest/lguest.c @@ -1996,6 +1996,9 @@ int main(int argc, char *argv[]) /* We use a simple helper to copy the arguments separated by spaces. */ concat((char *)(boot + 1), argv+optind+2); + /* Set kernel alignment to 16M (CONFIG_PHYSICAL_ALIGN) */ + boot->hdr.kernel_alignment = 0x1000000; + /* Boot protocol version: 2.07 supports the fields for lguest. */ boot->hdr.version = 0x207; -- cgit v0.10.2 From eade7b281c9fc18401b989c77d5e5e660b25a3b7 Mon Sep 17 00:00:00 2001 From: Daniel T Chen Date: Sun, 14 Aug 2011 22:43:01 -0400 Subject: ALSA: ac97: Add HP Compaq dc5100 SFF(PT003AW) to Headphone Jack Sense whitelist BugLink: https://bugs.launchpad.net/bugs/826081 The original reporter needs 'Headphone Jack Sense' enabled to have audible audio, so add his PCI SSID to the whitelist. Reported-and-tested-by: Muhammad Khurram Khan Cc: Signed-off-by: Daniel T Chen Signed-off-by: Takashi Iwai diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index 200c9a1..a872d0a 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c @@ -1909,6 +1909,7 @@ static unsigned int ad1981_jacks_whitelist[] = { 0x103c0944, /* HP nc6220 */ 0x103c0934, /* HP nc8220 */ 0x103c006d, /* HP nx9105 */ + 0x103c300d, /* HP Compaq dc5100 SFF(PT003AW) */ 0x17340088, /* FSC Scenic-W */ 0 /* end */ }; -- cgit v0.10.2 From d5811e8731213f80c80d89e980505052f16aca1c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 13 Aug 2011 13:36:13 -0400 Subject: drm/radeon/kms: don't try to be smart in the hpd handler Attempting to try and turn off disconnected display hw in the hotput handler lead to more problems than it helped. For now just register an event and only attempt the do something interesting with DP. Other connectors are just too problematic: - Some systems have an HPD pin assigned to LVDS, but it's rarely if ever connected properly and we don't really care about hpd events on LVDS anyway since it's always connected. - The HPD pin is wired up correctly for eDP, but we don't really have to do anything since the events since it's always connected. - Some HPD pins fire more than once when you connect/disconnect - etc. Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=39882 Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 645b84b..7ad43c6 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -613,6 +613,18 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector, return true; } +bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) +{ + u8 link_status[DP_LINK_STATUS_SIZE]; + struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; + + if (!radeon_dp_get_link_status(radeon_connector, link_status)) + return false; + if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) + return false; + return true; +} + struct radeon_dp_link_train_info { struct radeon_device *rdev; struct drm_encoder *encoder; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 441e070..7f65940 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -64,18 +64,16 @@ void radeon_connector_hotplug(struct drm_connector *connector) if (connector->dpms != DRM_MODE_DPMS_ON) return; - /* powering up/down the eDP panel generates hpd events which - * can interfere with modesetting. - */ - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) - return; + /* just deal with DP (not eDP) here. */ + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + int saved_dpms = connector->dpms; - /* pre-r600 did not always have the hpd pins mapped accurately to connectors */ - if (rdev->family >= CHIP_R600) { - if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) + if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && + radeon_dp_needs_link_train(radeon_connector)) drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); else drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + connector->dpms = saved_dpms; } } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index d09031c..68820f5 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -479,6 +479,7 @@ extern void radeon_dp_set_link_config(struct drm_connector *connector, struct drm_display_mode *mode); extern void radeon_dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector); +extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode); -- cgit v0.10.2 From 75f25bd31d9315ab57e4fb5eba3340452febc48d Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Wed, 3 Aug 2011 13:17:01 +0800 Subject: cpupower: avoid using symlinks Reference the source directly, don't create symlinks. Signed-off-by: WANG Cong Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile index dbf1399..3326217 100644 --- a/tools/power/cpupower/debug/x86_64/Makefile +++ b/tools/power/cpupower/debug/x86_64/Makefile @@ -1,10 +1,10 @@ default: all -centrino-decode: centrino-decode.c - $(CC) $(CFLAGS) -o centrino-decode centrino-decode.c +centrino-decode: ../i386/centrino-decode.c + $(CC) $(CFLAGS) -o $@ $< -powernow-k8-decode: powernow-k8-decode.c - $(CC) $(CFLAGS) -o powernow-k8-decode powernow-k8-decode.c +powernow-k8-decode: ../i386/powernow-k8-decode.c + $(CC) $(CFLAGS) -o $@ $< all: centrino-decode powernow-k8-decode diff --git a/tools/power/cpupower/debug/x86_64/centrino-decode.c b/tools/power/cpupower/debug/x86_64/centrino-decode.c deleted file mode 120000 index 26fb3f1..0000000 --- a/tools/power/cpupower/debug/x86_64/centrino-decode.c +++ /dev/null @@ -1 +0,0 @@ -../i386/centrino-decode.c \ No newline at end of file diff --git a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c b/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c deleted file mode 120000 index eb30c79..0000000 --- a/tools/power/cpupower/debug/x86_64/powernow-k8-decode.c +++ /dev/null @@ -1 +0,0 @@ -../i386/powernow-k8-decode.c \ No newline at end of file -- cgit v0.10.2 From 2dfc818b35cbea59188cc86e86e0a0efce2b0dbe Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Fri, 12 Aug 2011 01:11:35 +0200 Subject: cpupower: mperf monitor - Use TSC to calculate max frequency if possible Which makes the implementation independent from cpufreq drivers. Therefore this would also work on a Xen kernel where the hypervisor is doing frequency switching and idle entering. Signed-off-by: Thomas Renninger Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 94c2cf0..11521d2 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -24,7 +24,7 @@ # Set the following to `true' to make a unstripped, unoptimized # binary. Leave this set to `false' for production use. -DEBUG ?= false +DEBUG ?= true # make the build silent. Set this to something else to make it noisy again. V ?= false diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c index 63ca87a..5650ab5 100644 --- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c @@ -22,12 +22,15 @@ #define MSR_TSC 0x10 +#define MSR_AMD_HWCR 0xc0010015 + enum mperf_id { C0 = 0, Cx, AVG_FREQ, MPERF_CSTATE_COUNT }; static int mperf_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu); +static struct timespec time_start, time_end; static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { { @@ -54,19 +57,33 @@ static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { }, }; +enum MAX_FREQ_MODE { MAX_FREQ_SYSFS, MAX_FREQ_TSC_REF }; +static int max_freq_mode; +/* + * The max frequency mperf is ticking at (in C0), either retrieved via: + * 1) calculated after measurements if we know TSC ticks at mperf/P0 frequency + * 2) cpufreq /sys/devices/.../cpu0/cpufreq/cpuinfo_max_freq at init time + * 1. Is preferred as it also works without cpufreq subsystem (e.g. on Xen) + */ +static unsigned long max_frequency; + static unsigned long long tsc_at_measure_start; static unsigned long long tsc_at_measure_end; -static unsigned long max_frequency; static unsigned long long *mperf_previous_count; static unsigned long long *aperf_previous_count; static unsigned long long *mperf_current_count; static unsigned long long *aperf_current_count; + /* valid flag for all CPUs. If a MSR read failed it will be zero */ static int *is_valid; static int mperf_get_tsc(unsigned long long *tsc) { - return read_msr(0, MSR_TSC, tsc); + int ret; + ret = read_msr(0, MSR_TSC, tsc); + if (ret) + dprint("Reading TSC MSR failed, returning %llu\n", *tsc); + return ret; } static int mperf_init_stats(unsigned int cpu) @@ -97,36 +114,11 @@ static int mperf_measure_stats(unsigned int cpu) return 0; } -/* - * get_average_perf() - * - * Returns the average performance (also considers boosted frequencies) - * - * Input: - * aperf_diff: Difference of the aperf register over a time period - * mperf_diff: Difference of the mperf register over the same time period - * max_freq: Maximum frequency (P0) - * - * Returns: - * Average performance over the time period - */ -static unsigned long get_average_perf(unsigned long long aperf_diff, - unsigned long long mperf_diff) -{ - unsigned int perf_percent = 0; - if (((unsigned long)(-1) / 100) < aperf_diff) { - int shift_count = 7; - aperf_diff >>= shift_count; - mperf_diff >>= shift_count; - } - perf_percent = (aperf_diff * 100) / mperf_diff; - return (max_frequency * perf_percent) / 100; -} - static int mperf_get_count_percent(unsigned int id, double *percent, unsigned int cpu) { unsigned long long aperf_diff, mperf_diff, tsc_diff; + unsigned long long timediff; if (!is_valid[cpu]) return -1; @@ -136,11 +128,19 @@ static int mperf_get_count_percent(unsigned int id, double *percent, mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; - tsc_diff = tsc_at_measure_end - tsc_at_measure_start; - *percent = 100.0 * mperf_diff / tsc_diff; - dprint("%s: mperf_diff: %llu, tsc_diff: %llu\n", - mperf_cstates[id].name, mperf_diff, tsc_diff); + if (max_freq_mode == MAX_FREQ_TSC_REF) { + tsc_diff = tsc_at_measure_end - tsc_at_measure_start; + *percent = 100.0 * mperf_diff / tsc_diff; + dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n", + mperf_cstates[id].name, mperf_diff, tsc_diff); + } else if (max_freq_mode == MAX_FREQ_SYSFS) { + timediff = timespec_diff_us(time_start, time_end); + *percent = 100.0 * mperf_diff / timediff; + dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n", + mperf_cstates[id].name, mperf_diff, timediff); + } else + return -1; if (id == Cx) *percent = 100.0 - *percent; @@ -154,7 +154,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent, static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu) { - unsigned long long aperf_diff, mperf_diff; + unsigned long long aperf_diff, mperf_diff, time_diff, tsc_diff; if (id != AVG_FREQ) return 1; @@ -165,11 +165,21 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count, mperf_diff = mperf_current_count[cpu] - mperf_previous_count[cpu]; aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu]; - /* Return MHz for now, might want to return KHz if column width is more - generic */ - *count = get_average_perf(aperf_diff, mperf_diff) / 1000; - dprint("%s: %llu\n", mperf_cstates[id].name, *count); + if (max_freq_mode == MAX_FREQ_TSC_REF) { + /* Calculate max_freq from TSC count */ + tsc_diff = tsc_at_measure_end - tsc_at_measure_start; + time_diff = timespec_diff_us(time_start, time_end); + max_frequency = tsc_diff / time_diff; + } + *count = max_frequency * ((double)aperf_diff / mperf_diff); + dprint("%s: Average freq based on %s maximum frequency:\n", + mperf_cstates[id].name, + (max_freq_mode == MAX_FREQ_TSC_REF) ? "TSC calculated" : "sysfs read"); + dprint("%max_frequency: %lu", max_frequency); + dprint("aperf_diff: %llu\n", aperf_diff); + dprint("mperf_diff: %llu\n", mperf_diff); + dprint("avg freq: %llu\n", *count); return 0; } @@ -178,6 +188,7 @@ static int mperf_start(void) int cpu; unsigned long long dbg; + clock_gettime(CLOCK_REALTIME, &time_start); mperf_get_tsc(&tsc_at_measure_start); for (cpu = 0; cpu < cpu_count; cpu++) @@ -193,32 +204,104 @@ static int mperf_stop(void) unsigned long long dbg; int cpu; - mperf_get_tsc(&tsc_at_measure_end); - for (cpu = 0; cpu < cpu_count; cpu++) mperf_measure_stats(cpu); + mperf_get_tsc(&tsc_at_measure_end); + clock_gettime(CLOCK_REALTIME, &time_end); + mperf_get_tsc(&dbg); dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end); return 0; } -struct cpuidle_monitor mperf_monitor; - -struct cpuidle_monitor *mperf_register(void) +/* + * Mperf register is defined to tick at P0 (maximum) frequency + * + * Instead of reading out P0 which can be tricky to read out from HW, + * we use TSC counter if it reliably ticks at P0/mperf frequency. + * + * Still try to fall back to: + * /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq + * on older Intel HW without invariant TSC feature. + * Or on AMD machines where TSC does not tick at P0 (do not exist yet, but + * it's still double checked (MSR_AMD_HWCR)). + * + * On these machines the user would still get useful mperf + * stats when acpi-cpufreq driver is loaded. + */ +static int init_maxfreq_mode(void) { + int ret; + unsigned long long hwcr; unsigned long min; - if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) - return NULL; - - /* Assume min/max all the same on all cores */ + if (!cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC) + goto use_sysfs; + + if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) { + /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf + * freq. + * A test whether hwcr is accessable/available would be: + * (cpupower_cpu_info.family > 0x10 || + * cpupower_cpu_info.family == 0x10 && + * cpupower_cpu_info.model >= 0x2)) + * This should be the case for all aperf/mperf + * capable AMD machines and is therefore safe to test here. + * Compare with Linus kernel git commit: acf01734b1747b1ec4 + */ + ret = read_msr(0, MSR_AMD_HWCR, &hwcr); + /* + * If the MSR read failed, assume a Xen system that did + * not explicitly provide access to it and assume TSC works + */ + if (ret != 0) { + dprint("TSC read 0x%x failed - assume TSC working\n", + MSR_AMD_HWCR); + return 0; + } else if (1 & (hwcr >> 24)) { + max_freq_mode = MAX_FREQ_TSC_REF; + return 0; + } else { /* Use sysfs max frequency if available */ } + } else if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL) { + /* + * On Intel we assume mperf (in C0) is ticking at same + * rate than TSC + */ + max_freq_mode = MAX_FREQ_TSC_REF; + return 0; + } +use_sysfs: if (cpufreq_get_hardware_limits(0, &min, &max_frequency)) { dprint("Cannot retrieve max freq from cpufreq kernel " "subsystem\n"); - return NULL; + return -1; } + max_freq_mode = MAX_FREQ_SYSFS; + return 0; +} + +/* + * This monitor provides: + * + * 1) Average frequency a CPU resided in + * This always works if the CPU has aperf/mperf capabilities + * + * 2) C0 and Cx (any sleep state) time a CPU resided in + * Works if mperf timer stops ticking in sleep states which + * seem to be the case on all current HW. + * Both is directly retrieved from HW registers and is independent + * from kernel statistics. + */ +struct cpuidle_monitor mperf_monitor; +struct cpuidle_monitor *mperf_register(void) +{ + if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)) + return NULL; + + if (init_maxfreq_mode()) + return NULL; /* Free this at program termination */ is_valid = calloc(cpu_count, sizeof(int)); -- cgit v0.10.2 From 88f984e0e235f82a5d34f4a99244eeb14e1413e0 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Fri, 12 Aug 2011 01:11:36 +0200 Subject: cpupower: Do not show an empty Idle_Stats monitor if no idle driver is available By taking error values of: sysfs_get_idlestate_count(..); into account. Signed-off-by: Thomas Renninger Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c index d048b96..bcd22a1 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c @@ -134,7 +134,7 @@ static struct cpuidle_monitor *cpuidle_register(void) /* Assume idle state count is the same for all CPUs */ cpuidle_sysfs_monitor.hw_states_num = sysfs_get_idlestate_count(0); - if (cpuidle_sysfs_monitor.hw_states_num == 0) + if (cpuidle_sysfs_monitor.hw_states_num <= 0) return NULL; for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) { -- cgit v0.10.2 From 7c74d2bc5a9d43d33d6f16c1e706147162e2bc52 Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Fri, 12 Aug 2011 01:11:37 +0200 Subject: cpupower: Better detect offlined CPUs Before, checking for offlined CPUs was done dirty and it was checked whether topology parsing returned -1 values. But this is a valid case on a Xen (and possibly other) kernels. Do proper online/offline checking, also take CONFIG_HOTPLUG_CPU option into account (no /sys/devices/../cpuX/online file). Signed-off-by: Thomas Renninger Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 592ee36..7a83022 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h @@ -96,6 +96,9 @@ struct cpupower_topology { int pkg; int core; int cpu; + + /* flags */ + unsigned int is_online:1; } *core_info; }; diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c index 55e2466..c634302 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.c +++ b/tools/power/cpupower/utils/helpers/sysfs.c @@ -56,6 +56,56 @@ static unsigned int sysfs_write_file(const char *path, return (unsigned int) numwrite; } +/* + * Detect whether a CPU is online + * + * Returns: + * 1 -> if CPU is online + * 0 -> if CPU is offline + * negative errno values in error case + */ +int sysfs_is_cpu_online(unsigned int cpu) +{ + char path[SYSFS_PATH_MAX]; + int fd; + ssize_t numread; + unsigned long long value; + char linebuf[MAX_LINE_LEN]; + char *endp; + struct stat statbuf; + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u", cpu); + + if (stat(path, &statbuf) != 0) + return 0; + + /* + * kernel without CONFIG_HOTPLUG_CPU + * -> cpuX directory exists, but not cpuX/online file + */ + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/online", cpu); + if (stat(path, &statbuf) != 0) + return 1; + + fd = open(path, O_RDONLY); + if (fd == -1) + return -errno; + + numread = read(fd, linebuf, MAX_LINE_LEN - 1); + if (numread < 1) { + close(fd); + return -EIO; + } + linebuf[numread] = '\0'; + close(fd); + + value = strtoull(linebuf, &endp, 0); + if (value > 1 || value < 0) + return -EINVAL; + + return value; +} + /* CPUidle idlestate specific /sys/devices/system/cpu/cpuX/cpuidle/ access */ /* diff --git a/tools/power/cpupower/utils/helpers/sysfs.h b/tools/power/cpupower/utils/helpers/sysfs.h index f9373e0..8cb797b 100644 --- a/tools/power/cpupower/utils/helpers/sysfs.h +++ b/tools/power/cpupower/utils/helpers/sysfs.h @@ -7,6 +7,8 @@ extern unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); +extern int sysfs_is_cpu_online(unsigned int cpu); + extern unsigned long sysfs_get_idlestate_latency(unsigned int cpu, unsigned int idlestate); extern unsigned long sysfs_get_idlestate_usage(unsigned int cpu, diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c index 385ee5c..4eae2c4 100644 --- a/tools/power/cpupower/utils/helpers/topology.c +++ b/tools/power/cpupower/utils/helpers/topology.c @@ -41,6 +41,8 @@ struct cpuid_core_info { unsigned int pkg; unsigned int thread; unsigned int cpu; + /* flags */ + unsigned int is_online:1; }; static int __compare(const void *t1, const void *t2) @@ -78,6 +80,8 @@ int get_cpu_topology(struct cpupower_topology *cpu_top) return -ENOMEM; cpu_top->pkgs = cpu_top->cores = 0; for (cpu = 0; cpu < cpus; cpu++) { + cpu_top->core_info[cpu].cpu = cpu; + cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu); cpu_top->core_info[cpu].pkg = sysfs_topology_read_file(cpu, "physical_package_id"); if ((int)cpu_top->core_info[cpu].pkg != -1 && @@ -85,7 +89,6 @@ int get_cpu_topology(struct cpupower_topology *cpu_top) cpu_top->pkgs = cpu_top->core_info[cpu].pkg; cpu_top->core_info[cpu].core = sysfs_topology_read_file(cpu, "core_id"); - cpu_top->core_info[cpu].cpu = cpu; } cpu_top->pkgs++; diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index ba4bf06..dd8e1ea 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c @@ -190,9 +190,13 @@ void print_results(int topology_depth, int cpu) } } } - /* cpu offline */ - if (cpu_top.core_info[cpu].pkg == -1 || - cpu_top.core_info[cpu].core == -1) { + /* + * The monitor could still provide useful data, for example + * AMD HW counters partly sit in PCI config space. + * It's up to the monitor plug-in to check .is_online, this one + * is just for additional info. + */ + if (!cpu_top.core_info[cpu].is_online) { printf(_(" *is offline\n")); return; } else -- cgit v0.10.2 From 9ee31f618a3c8209b2bd4bedd71fd5f2be7786bd Mon Sep 17 00:00:00 2001 From: Thomas Renninger Date: Fri, 12 Aug 2011 01:11:38 +0200 Subject: cpupower: Make monitor command -c/--cpu aware This allows for example: cpupower -c 2-4,6 monitor -m Mperf |Mperf PKG |CORE|CPU | C0 | Cx | Freq 0| 8| 4| 2.42| 97.58| 1353 0| 16| 2| 14.38| 85.62| 1928 0| 24| 6| 1.76| 98.24| 1442 1| 16| 3| 15.53| 84.47| 1650 CPUs always get resorted for package, core then cpu id if it could get read out (or however you name these topology levels...). Still this is a nice way to keep the overview if a test binary is bound to a specific CPU or if one wants to show all CPUs inside a package or similar. Still missing: Do not measure not available cores to reduce the overhead and achieve better results. Signed-off-by: Thomas Renninger Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index dd8e1ea..6cb8d9e 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c @@ -149,6 +149,10 @@ void print_results(int topology_depth, int cpu) unsigned long long result; cstate_t s; + /* Be careful CPUs may got resorted for pkg value do not just use cpu */ + if (!bitmask_isbitset(cpus_chosen, cpu_top.core_info[cpu].cpu)) + return; + if (topology_depth > 2) printf("%4d|", cpu_top.core_info[cpu].pkg); if (topology_depth > 1) @@ -389,6 +393,10 @@ int cmd_monitor(int argc, char **argv) return EXIT_FAILURE; } + /* Default is: monitor all CPUs */ + if (bitmask_isallclear(cpus_chosen)) + bitmask_setall(cpus_chosen); + dprint("System has up to %d CPU cores\n", cpu_count); for (num = 0; all_monitors[num]; num++) { -- cgit v0.10.2 From aaa6fd2a004147bf32fce05720938236de3361d9 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 12 Aug 2011 12:11:33 +0200 Subject: Not all systems expose a firmware or platform mechanism for changing the backlight intensity on i915, so add native driver support. Signed-off-by: Matthew Garrett Cc: Richard Purdie Cc: Chris Wilson Cc: David Airlie Cc: Alex Deucher Cc: Ben Skeggs Cc: Zhang Rui Cc: Len Brown Cc: Jesse Barnes Tested-by: Sedat Dilek Tested-by: Michel Alexandre Salim Tested-by: Kamal Mostafa Signed-off-by: Andrew Morton Signed-off-by: Keith Packard diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index feb4f16..7916bd9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -36,6 +36,7 @@ #include #include #include +#include /* General customization: */ @@ -690,6 +691,7 @@ typedef struct drm_i915_private { int child_dev_num; struct child_device_config *child_dev; struct drm_connector *int_lvds_connector; + struct drm_connector *int_edp_connector; bool mchbar_need_disable; @@ -723,6 +725,8 @@ typedef struct drm_i915_private { /* list of fbdev register on this device */ struct intel_fbdev *fbdev; + struct backlight_device *backlight; + struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0feae90..44fef5e 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1841,6 +1841,11 @@ done: static void intel_dp_destroy (struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + + if (intel_dpd_is_edp(dev)) + intel_panel_destroy_backlight(dev); + drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); kfree(connector); @@ -2072,6 +2077,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) DRM_MODE_TYPE_PREFERRED; } } + dev_priv->int_edp_connector = connector; + intel_panel_setup_backlight(dev); } intel_dp_add_properties(intel_dp, connector); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7b330e7..0b2ee9d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -297,9 +297,10 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, extern u32 intel_panel_get_max_backlight(struct drm_device *dev); extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); -extern void intel_panel_setup_backlight(struct drm_device *dev); +extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev); extern void intel_panel_disable_backlight(struct drm_device *dev); +extern void intel_panel_destroy_backlight(struct drm_device *dev); extern enum drm_connector_status intel_panel_detect(struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8b521a2..31da77f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -552,6 +552,8 @@ static void intel_lvds_destroy(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; + intel_panel_destroy_backlight(dev); + if (dev_priv->lid_notifier.notifier_call) acpi_lid_notifier_unregister(&dev_priv->lid_notifier); drm_sysfs_connector_remove(connector); @@ -1032,6 +1034,9 @@ out: /* keep the LVDS connector */ dev_priv->int_lvds_connector = connector; drm_sysfs_connector_add(connector); + + intel_panel_setup_backlight(dev); + return true; failed: diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b7c5ddb..b8e8158b 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -227,7 +227,6 @@ void intel_opregion_asle_intr(struct drm_device *dev) asle->aslc = asle_stat; } -/* Only present on Ironlake+ */ void intel_opregion_gse_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 05f500c..a9e0c7b 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -277,7 +277,7 @@ void intel_panel_enable_backlight(struct drm_device *dev) dev_priv->backlight_enabled = true; } -void intel_panel_setup_backlight(struct drm_device *dev) +static void intel_panel_init_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -309,3 +309,73 @@ intel_panel_detect(struct drm_device *dev) return connector_status_unknown; } + +#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE +static int intel_panel_update_status(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + intel_panel_set_backlight(dev, bd->props.brightness); + return 0; +} + +static int intel_panel_get_brightness(struct backlight_device *bd) +{ + struct drm_device *dev = bl_get_data(bd); + return intel_panel_get_backlight(dev); +} + +static const struct backlight_ops intel_panel_bl_ops = { + .update_status = intel_panel_update_status, + .get_brightness = intel_panel_get_brightness, +}; + +int intel_panel_setup_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct backlight_properties props; + struct drm_connector *connector; + + intel_panel_init_backlight(dev); + + if (dev_priv->int_lvds_connector) + connector = dev_priv->int_lvds_connector; + else if (dev_priv->int_edp_connector) + connector = dev_priv->int_edp_connector; + else + return -ENODEV; + + props.type = BACKLIGHT_RAW; + props.max_brightness = intel_panel_get_max_backlight(dev); + dev_priv->backlight = + backlight_device_register("intel_backlight", + &connector->kdev, dev, + &intel_panel_bl_ops, &props); + + if (IS_ERR(dev_priv->backlight)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(dev_priv->backlight)); + dev_priv->backlight = NULL; + return -ENODEV; + } + dev_priv->backlight->props.brightness = intel_panel_get_backlight(dev); + return 0; +} + +void intel_panel_destroy_backlight(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (dev_priv->backlight) + backlight_device_unregister(dev_priv->backlight); +} +#else +int intel_panel_setup_backlight(struct drm_device *dev) +{ + intel_panel_init_backlight(dev); + return 0; +} + +void intel_panel_destroy_backlight(struct drm_device *dev) +{ + return; +} +#endif -- cgit v0.10.2 From c3613de92ebea302137d21d8938421c3f88d8741 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Fri, 12 Aug 2011 17:05:54 -0700 Subject: drm/i915: Can't do accurate vblank timestamps with UMS Disable this feature when KMS is not running by setting the driver->get_vblank_timestamp function pointer to NULL. Signed-off-by: Keith Packard Tested-by: Justin P. Mattock diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 02f96fd..9cbb0cd 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2058,8 +2058,10 @@ void intel_irq_init(struct drm_device *dev) dev->driver->get_vblank_counter = gm45_get_vblank_counter; } - - dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; + else + dev->driver->get_vblank_timestamp = NULL; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; if (IS_IVYBRIDGE(dev)) { -- cgit v0.10.2 From 92b79f4322b8a2506bdd862f554a2a81ff0a2dad Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Fri, 12 Aug 2011 17:07:18 -0700 Subject: drm/i915: Cannot set clock gating under UMS The clock gating functions are only assigned under KMS, so don't try to call them under UMS. Signed-off-by: Keith Packard Tested-by: Justin P. Mattock diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 87677d6..f107423 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -871,7 +871,8 @@ int i915_restore_state(struct drm_device *dev) } mutex_unlock(&dev->struct_mutex); - intel_init_clock_gating(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) + intel_init_clock_gating(dev); if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); -- cgit v0.10.2 From 4853abaae7e4a2af938115ce9071ef8684fb7af4 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Mon, 15 Aug 2011 21:37:25 +0200 Subject: block: fix flush machinery for stacking drivers with differring flush flags Commit ae1b1539622fb46e51b4d13b3f9e5f4c713f86ae, block: reimplement FLUSH/FUA to support merge, introduced a performance regression when running any sort of fsyncing workload using dm-multipath and certain storage (in our case, an HP EVA). The test I ran was fs_mark, and it dropped from ~800 files/sec on ext4 to ~100 files/sec. It turns out that dm-multipath always advertised flush+fua support, and passed commands on down the stack, where those flags used to get stripped off. The above commit changed that behavior: static inline struct request *__elv_next_request(struct request_queue *q) { struct request *rq; while (1) { - while (!list_empty(&q->queue_head)) { + if (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); - if (!(rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) || - (rq->cmd_flags & REQ_FLUSH_SEQ)) - return rq; - rq = blk_do_flush(q, rq); - if (rq) - return rq; + return rq; } Note that previously, a command would come in here, have REQ_FLUSH|REQ_FUA set, and then get handed off to blk_do_flush: struct request *blk_do_flush(struct request_queue *q, struct request *rq) { unsigned int fflags = q->flush_flags; /* may change, cache it */ bool has_flush = fflags & REQ_FLUSH, has_fua = fflags & REQ_FUA; bool do_preflush = has_flush && (rq->cmd_flags & REQ_FLUSH); bool do_postflush = has_flush && !has_fua && (rq->cmd_flags & REQ_FUA); unsigned skip = 0; ... if (blk_rq_sectors(rq) && !do_preflush && !do_postflush) { rq->cmd_flags &= ~REQ_FLUSH; if (!has_fua) rq->cmd_flags &= ~REQ_FUA; return rq; } So, the flush machinery was bypassed in such cases (q->flush_flags == 0 && rq->cmd_flags & (REQ_FLUSH|REQ_FUA)). Now, however, we don't get into the flush machinery at all. Instead, __elv_next_request just hands a request with flush and fua bits set to the scsi_request_fn, even if the underlying request_queue does not support flush or fua. The agreed upon approach is to fix the flush machinery to allow stacking. While this isn't used in practice (since there is only one request-based dm target, and that target will now reflect the flush flags of the underlying device), it does future-proof the solution, and make it function as designed. In order to make this work, I had to add a field to the struct request, inside the flush structure (to store the original req->end_io). Shaohua had suggested overloading the union with rb_node and completion_data, but the completion data is used by device mapper and can also be used by other drivers. So, I didn't see a way around the additional field. I tested this patch on an HP EVA with both ext4 and xfs, and it recovers the lost performance. Comments and other testers, as always, are appreciated. Cheers, Jeff Signed-off-by: Jeff Moyer Acked-by: Tejun Heo Signed-off-by: Jens Axboe diff --git a/block/blk-core.c b/block/blk-core.c index b850bed..7c59b0f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1700,6 +1700,7 @@ EXPORT_SYMBOL_GPL(blk_rq_check_limits); int blk_insert_cloned_request(struct request_queue *q, struct request *rq) { unsigned long flags; + int where = ELEVATOR_INSERT_BACK; if (blk_rq_check_limits(q, rq)) return -EIO; @@ -1716,7 +1717,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) */ BUG_ON(blk_queued_rq(rq)); - add_acct_request(q, rq, ELEVATOR_INSERT_BACK); + if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) + where = ELEVATOR_INSERT_FLUSH; + + add_acct_request(q, rq, where); spin_unlock_irqrestore(q->queue_lock, flags); return 0; @@ -2273,7 +2277,7 @@ static bool blk_end_bidi_request(struct request *rq, int error, * %false - we are done with this request * %true - still buffers pending for this request **/ -static bool __blk_end_bidi_request(struct request *rq, int error, +bool __blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, unsigned int bidi_bytes) { if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) diff --git a/block/blk-flush.c b/block/blk-flush.c index 2d162bd..491eb30 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -123,7 +123,7 @@ static void blk_flush_restore_request(struct request *rq) /* make @rq a normal request */ rq->cmd_flags &= ~REQ_FLUSH_SEQ; - rq->end_io = NULL; + rq->end_io = rq->flush.saved_end_io; } /** @@ -301,9 +301,6 @@ void blk_insert_flush(struct request *rq) unsigned int fflags = q->flush_flags; /* may change, cache */ unsigned int policy = blk_flush_policy(fflags, rq); - BUG_ON(rq->end_io); - BUG_ON(!rq->bio || rq->bio != rq->biotail); - /* * @policy now records what operations need to be done. Adjust * REQ_FLUSH and FUA for the driver. @@ -313,6 +310,19 @@ void blk_insert_flush(struct request *rq) rq->cmd_flags &= ~REQ_FUA; /* + * An empty flush handed down from a stacking driver may + * translate into nothing if the underlying device does not + * advertise a write-back cache. In this case, simply + * complete the request. + */ + if (!policy) { + __blk_end_bidi_request(rq, 0, 0, 0); + return; + } + + BUG_ON(!rq->bio || rq->bio != rq->biotail); + + /* * If there's data but flush is not necessary, the request can be * processed directly without going through flush machinery. Queue * for normal execution. @@ -320,6 +330,7 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { list_add_tail(&rq->queuelist, &q->queue_head); + blk_run_queue_async(q); return; } @@ -330,6 +341,7 @@ void blk_insert_flush(struct request *rq) memset(&rq->flush, 0, sizeof(rq->flush)); INIT_LIST_HEAD(&rq->flush.list); rq->cmd_flags |= REQ_FLUSH_SEQ; + rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ rq->end_io = flush_data_end_io; blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); diff --git a/block/blk.h b/block/blk.h index d658628..20b900a 100644 --- a/block/blk.h +++ b/block/blk.h @@ -17,6 +17,8 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio); void blk_dequeue_request(struct request *rq); void __blk_queue_free_tags(struct request_queue *q); +bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes); void blk_rq_timed_out_timer(unsigned long data); void blk_delete_timer(struct request *); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8479285..84b15d5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -118,6 +118,7 @@ struct request { struct { unsigned int seq; struct list_head list; + rq_end_io_fn *saved_end_io; } flush; }; -- cgit v0.10.2 From a0fba3eb059e73fed2d376a901f8117734c12f1f Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Mon, 15 Aug 2011 10:10:31 +0000 Subject: sparc64: remove unnecessary macros from spinlock_64.h The sparc64 spinlock_64.h contains a number of operations defined first as static inline functions, and then as macros with the same names and parameters as the functions. Maybe this was needed at some point in the past, but now nothing seems to depend on these macros (checked with a recursive grep looking for ifdefs on these names). Other archs don't define these identity-macros. So this patch deletes these unnecessary macros. Compile-tested with sparc64_defconfig. Signed-off-by: Mikael Pettersson Signed-off-by: David S. Miller diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 073936a..9689176 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -210,14 +210,8 @@ static int inline arch_write_trylock(arch_rwlock_t *lock) return result; } -#define arch_read_lock(p) arch_read_lock(p) #define arch_read_lock_flags(p, f) arch_read_lock(p) -#define arch_read_trylock(p) arch_read_trylock(p) -#define arch_read_unlock(p) arch_read_unlock(p) -#define arch_write_lock(p) arch_write_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) -#define arch_write_unlock(p) arch_write_unlock(p) -#define arch_write_trylock(p) arch_write_trylock(p) #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) #define arch_write_can_lock(rw) (!(rw)->lock) -- cgit v0.10.2 From 3f6aa0b113846a8628baa649af422cfc6fb1d786 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Mon, 15 Aug 2011 10:11:50 +0000 Subject: sparc32: unbreak arch_write_unlock() The sparc32 version of arch_write_unlock() is just a plain assignment. Unfortunately this allows the compiler to schedule side-effects in a protected region to occur after the HW-level unlock, which is broken. E.g., the following trivial test case gets miscompiled: #include rwlock_t lock; int counter; void foo(void) { write_lock(&lock); ++counter; write_unlock(&lock); } Fixed by adding a compiler memory barrier to arch_write_unlock(). The sparc64 version combines the barrier and assignment into a single asm(), and implements the operation as a static inline, so that's what I did too. Compile-tested with sparc32_defconfig + CONFIG_SMP=y. Signed-off-by: Mikael Pettersson Signed-off-by: David S. Miller diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 5f5b8bf..bcc98fc 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -131,6 +131,15 @@ static inline void arch_write_lock(arch_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } +static void inline arch_write_unlock(arch_rwlock_t *lock) +{ + __asm__ __volatile__( +" st %%g0, [%0]" + : /* no outputs */ + : "r" (lock) + : "memory"); +} + static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -175,8 +184,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw) res; \ }) -#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) - #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) -- cgit v0.10.2 From 178a29600340bef5b13cd4157053679debe35351 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 15 Aug 2011 14:45:17 -0700 Subject: sparc64: Set HAVE_C_RECORDMCOUNT Signed-off-by: David S. Miller diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 42c67be..1a6f20d 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -55,6 +55,7 @@ config SPARC64 select PERF_USE_VMALLOC select IRQ_PREFLOW_FASTEOI select ARCH_HAVE_NMI_SAFE_CMPXCHG + select HAVE_C_RECORDMCOUNT config ARCH_DEFCONFIG string -- cgit v0.10.2 From cedf03bd9aa54d1d7a9065dddc9e76505f476b12 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 15 Aug 2011 10:18:46 -0700 Subject: x86: fix mm/fault.c build arch/x86/mm/fault.c needs to include asm/vsyscall.h to fix a build error: arch/x86/mm/fault.c: In function '__bad_area_nosemaphore': arch/x86/mm/fault.c:728: error: 'VSYSCALL_START' undeclared (first use in this function) Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 247aae3..0d17c8c 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -17,6 +17,7 @@ #include /* dotraplinkage, ... */ #include /* pgd_*(), ... */ #include /* kmemcheck_*(), ... */ +#include /* * Page fault error code bits: -- cgit v0.10.2 From b5ddbf465f3675b19c8f5528b4064cbf278a5c6f Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 16 Aug 2011 09:36:06 +1000 Subject: regmap: using module facilities requires module.h Commit b33f9cbd67ba ("regmap: Specify a module license") added a MODULES_LICENSE to this file without adding an include of module.h. module.h should have been included anyway, since this file has EXPORT_SYMBOLs as well. With the pending module.h split up, this would probably have caused build problems. Cc: Stephen Warren Cc: Mark Brown Signed-off-by: Stephen Rothwell Signed-off-by: Linus Torvalds diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index 2bbc659..f839694 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -13,6 +13,7 @@ #include #include #include +#include static int regmap_spi_write(struct device *dev, const void *data, size_t count) { -- cgit v0.10.2 From 18adad1c57f820d38d05e3d5e3d548e286233b76 Mon Sep 17 00:00:00 2001 From: Gerard Braad Date: Tue, 16 Aug 2011 00:17:56 -0700 Subject: Input: wacom - add support for the Wacom Bamboo Pen (CTL-660/K) Signed-off-by: Gerard Braad Reviewed-by: Chris Bagwell Signed-off-by: Ping Cheng Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 03ebcc8..c1c2f7b 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1460,6 +1460,9 @@ static const struct wacom_features wacom_features_0xD3 = static const struct wacom_features wacom_features_0xD4 = { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0xD5 = + { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, + 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD6 = { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1564,6 +1567,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xD2) }, { USB_DEVICE_WACOM(0xD3) }, { USB_DEVICE_WACOM(0xD4) }, + { USB_DEVICE_WACOM(0xD5) }, { USB_DEVICE_WACOM(0xD6) }, { USB_DEVICE_WACOM(0xD7) }, { USB_DEVICE_WACOM(0xD8) }, -- cgit v0.10.2 From a417ea4432db7fd1c91c19b129a3e3d2367b7ce4 Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Tue, 16 Aug 2011 00:17:56 -0700 Subject: Input: wacom - add WAC_MSG_RETRIES define Use WAC_MSG_RETRIES define instead of a numeric constant. Signed-off-by: Ping Cheng Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 449c0a4..9879c73 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -49,6 +49,7 @@ struct hid_descriptor { #define USB_REQ_GET_REPORT 0x01 #define USB_REQ_SET_REPORT 0x09 #define WAC_HID_FEATURE_REPORT 0x03 +#define WAC_MSG_RETRIES 5 static int usb_get_report(struct usb_interface *intf, unsigned char type, unsigned char id, void *buf, int size) @@ -165,7 +166,7 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi report, hid_desc->wDescriptorLength, 5000); /* 5 secs */ - } while (result < 0 && limit++ < 5); + } while (result < 0 && limit++ < WAC_MSG_RETRIES); /* No need to parse the Descriptor. It isn't an error though */ if (result < 0) @@ -336,7 +337,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat error = usb_get_report(intf, WAC_HID_FEATURE_REPORT, report_id, rep_data, 3); - } while ((error < 0 || rep_data[1] != 4) && limit++ < 5); + } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES); } else if (features->type != TABLETPC) { do { rep_data[0] = 2; @@ -347,7 +348,7 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat error = usb_get_report(intf, WAC_HID_FEATURE_REPORT, report_id, rep_data, 2); - } while ((error < 0 || rep_data[1] != 2) && limit++ < 5); + } while ((error < 0 || rep_data[1] != 2) && limit++ < WAC_MSG_RETRIES); } kfree(rep_data); -- cgit v0.10.2 From 3b48c91cdf2d6827ce315b3b112310fa02198db0 Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Tue, 16 Aug 2011 00:17:57 -0700 Subject: Input: wacom - report id 3 returns 4 bytes of data Signed-off-by: Ping Cheng Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 9879c73..d27c9d9 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -320,23 +320,25 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat int limit = 0, report_id = 2; int error = -ENOMEM; - rep_data = kmalloc(2, GFP_KERNEL); + rep_data = kmalloc(4, GFP_KERNEL); if (!rep_data) return error; - /* ask to report tablet data if it is 2FGT Tablet PC or + /* ask to report tablet data if it is MT Tablet PC or * not a Tablet PC */ if (features->type == TABLETPC2FG) { do { rep_data[0] = 3; rep_data[1] = 4; + rep_data[2] = 0; + rep_data[3] = 0; report_id = 3; error = usb_set_report(intf, WAC_HID_FEATURE_REPORT, - report_id, rep_data, 2); + report_id, rep_data, 4); if (error >= 0) error = usb_get_report(intf, WAC_HID_FEATURE_REPORT, report_id, - rep_data, 3); + rep_data, 4); } while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES); } else if (features->type != TABLETPC) { do { -- cgit v0.10.2 From c503ad466da44ca23c658986629bf7a2e2eabbb7 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Tue, 16 Aug 2011 14:23:20 +0200 Subject: ALSA: hda - Fix duplicated capture-volume creation for ALC268 models Fix the duplicated creation of capture-mixer elements for some static ALC268 configurations. The capture mixers must be put to cap_mixer field instead of mixers array. Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/alc268_quirks.c b/sound/pci/hda/alc268_quirks.c index be58bf2..2e5876c 100644 --- a/sound/pci/hda/alc268_quirks.c +++ b/sound/pci/hda/alc268_quirks.c @@ -476,8 +476,8 @@ static const struct snd_pci_quirk alc268_ssid_cfg_tbl[] = { static const struct alc_config_preset alc268_presets[] = { [ALC267_QUANTA_IL1] = { - .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer, - alc268_capture_nosrc_mixer }, + .mixers = { alc267_quanta_il1_mixer, alc268_beep_mixer }, + .cap_mixer = alc268_capture_nosrc_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc267_quanta_il1_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -492,8 +492,8 @@ static const struct alc_config_preset alc268_presets[] = { .init_hook = alc_inithook, }, [ALC268_3ST] = { - .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, - alc268_beep_mixer }, + .mixers = { alc268_base_mixer, alc268_beep_mixer }, + .cap_mixer = alc268_capture_alt_mixer, .init_verbs = { alc268_base_init_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), .dac_nids = alc268_dac_nids, @@ -507,8 +507,8 @@ static const struct alc_config_preset alc268_presets[] = { .input_mux = &alc268_capture_source, }, [ALC268_TOSHIBA] = { - .mixers = { alc268_toshiba_mixer, alc268_capture_alt_mixer, - alc268_beep_mixer }, + .mixers = { alc268_toshiba_mixer, alc268_beep_mixer }, + .cap_mixer = alc268_capture_alt_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_toshiba_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -525,8 +525,8 @@ static const struct alc_config_preset alc268_presets[] = { .init_hook = alc_inithook, }, [ALC268_ACER] = { - .mixers = { alc268_acer_mixer, alc268_capture_alt_mixer, - alc268_beep_mixer }, + .mixers = { alc268_acer_mixer, alc268_beep_mixer }, + .cap_mixer = alc268_capture_alt_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_acer_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -543,8 +543,8 @@ static const struct alc_config_preset alc268_presets[] = { .init_hook = alc_inithook, }, [ALC268_ACER_DMIC] = { - .mixers = { alc268_acer_dmic_mixer, alc268_capture_alt_mixer, - alc268_beep_mixer }, + .mixers = { alc268_acer_dmic_mixer, alc268_beep_mixer }, + .cap_mixer = alc268_capture_alt_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_acer_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -561,9 +561,8 @@ static const struct alc_config_preset alc268_presets[] = { .init_hook = alc_inithook, }, [ALC268_ACER_ASPIRE_ONE] = { - .mixers = { alc268_acer_aspire_one_mixer, - alc268_beep_mixer, - alc268_capture_nosrc_mixer }, + .mixers = { alc268_acer_aspire_one_mixer, alc268_beep_mixer}, + .cap_mixer = alc268_capture_nosrc_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_acer_aspire_one_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -579,8 +578,8 @@ static const struct alc_config_preset alc268_presets[] = { .init_hook = alc_inithook, }, [ALC268_DELL] = { - .mixers = { alc268_dell_mixer, alc268_beep_mixer, - alc268_capture_nosrc_mixer }, + .mixers = { alc268_dell_mixer, alc268_beep_mixer}, + .cap_mixer = alc268_capture_nosrc_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_dell_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -596,8 +595,8 @@ static const struct alc_config_preset alc268_presets[] = { .init_hook = alc_inithook, }, [ALC268_ZEPTO] = { - .mixers = { alc268_base_mixer, alc268_capture_alt_mixer, - alc268_beep_mixer }, + .mixers = { alc268_base_mixer, alc268_beep_mixer }, + .cap_mixer = alc268_capture_alt_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_toshiba_verbs }, .num_dacs = ARRAY_SIZE(alc268_dac_nids), @@ -616,7 +615,8 @@ static const struct alc_config_preset alc268_presets[] = { }, #ifdef CONFIG_SND_DEBUG [ALC268_TEST] = { - .mixers = { alc268_test_mixer, alc268_capture_mixer }, + .mixers = { alc268_test_mixer }, + .cap_mixer = alc268_capture_mixer, .init_verbs = { alc268_base_init_verbs, alc268_eapd_verbs, alc268_volume_init_verbs, alc268_beep_init_verbs }, -- cgit v0.10.2 From fa71f447065f676157ba6a2c121ba419818fc559 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 8 Aug 2011 11:50:24 -0400 Subject: cifs: demote cERROR in build_path_from_dentry to cFYI Running the cthon tests on a recent kernel caused this message to pop occasionally: CIFS VFS: did not end path lookup where expected namelen is 0 Some added debugging showed that namelen and dfsplen were both 0 when this occurred. That means that the read_seqretry returned true. Assuming that the comment inside the if statement is true, this should be harmless and just means that we raced with a rename. If that is the case, then there's no need for alarm and we can demote this to cFYI. While we're at it, print the dfsplen too so that we can see what happened here if the message pops during debugging. Cc: stable@kernel.org Cc: Al Viro Signed-off-by: Jeff Layton Signed-off-by: Steve French diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ae576fb..72d448b 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -105,8 +105,8 @@ cifs_bp_rename_retry: } rcu_read_unlock(); if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { - cERROR(1, "did not end path lookup where expected namelen is %d", - namelen); + cFYI(1, "did not end path lookup where expected. namelen=%d " + "dfsplen=%d", namelen, dfsplen); /* presumably this is only possible if racing with a rename of one of the parent directories (we can not lock the dentries above us to prevent this, but retrying should be harmless) */ -- cgit v0.10.2 From c3585aa91a25264234c8bd27a4a6823d4e544c2a Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Tue, 16 Aug 2011 14:18:48 +0100 Subject: gma500: kill MIPI interface types Kirill Shutemov found problems with the non-upstream IMG driver where the use of extra DRM encoder/connector types caused random crashes when the DRM layer tried to display their matching name. This removes the MIPI types matching the changes Pauli Nieminen made to the non upstream driver set. As Pauli points out: " MIPI (or DSI) is protocol specification on top of LVDS serial bus. That makes it resonable to call MIPI connectors and encoders LVDS." (and indeed they may also be HDMI convertors or similar when we want to report a more useful to end user result) Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c index 02e17c9..fd211f3 100644 --- a/drivers/staging/gma500/mdfld_dsi_dbi.c +++ b/drivers/staging/gma500/mdfld_dsi_dbi.c @@ -711,10 +711,11 @@ struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev, /* Create drm encoder object */ connector = &dsi_connector->base.base; encoder = &dbi_output->base.base; + /* Review this if we ever get MIPI-HDMI bridges or similar */ drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, - DRM_MODE_ENCODER_MIPI); + DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); /* Attach to given connector */ diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h index dc6242c..f0fa986 100644 --- a/drivers/staging/gma500/mdfld_dsi_dbi.h +++ b/drivers/staging/gma500/mdfld_dsi_dbi.h @@ -42,9 +42,6 @@ #include "mdfld_dsi_output.h" #include "mdfld_output.h" -#define DRM_MODE_ENCODER_MIPI 5 - - /* * DBI encoder which inherits from mdfld_dsi_encoder */ diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c index 6e03a91..e685f12 100644 --- a/drivers/staging/gma500/mdfld_dsi_dpi.c +++ b/drivers/staging/gma500/mdfld_dsi_dpi.c @@ -777,10 +777,15 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev, /* Create drm encoder object */ connector = &dsi_connector->base.base; encoder = &dpi_output->base.base; + /* + * On existing hardware this will be a panel of some form, + * if future devices also have HDMI bridges this will need + * revisiting + */ drm_encoder_init(dev, encoder, p_funcs->encoder_funcs, - DRM_MODE_ENCODER_MIPI); + DRM_MODE_ENCODER_LVDS); drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs); diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c index 7536095..9050c0f 100644 --- a/drivers/staging/gma500/mdfld_dsi_output.c +++ b/drivers/staging/gma500/mdfld_dsi_output.c @@ -955,7 +955,9 @@ void mdfld_dsi_output_init(struct drm_device *dev, psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2; connector = &psb_output->base; - drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, DRM_MODE_CONNECTOR_MIPI); + /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */ + drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs, + DRM_MODE_CONNECTOR_LVDS); drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h index 38165e8..09e9687 100644 --- a/drivers/staging/gma500/medfield.h +++ b/drivers/staging/gma500/medfield.h @@ -21,8 +21,6 @@ * DEALINGS IN THE SOFTWARE. */ -#define DRM_MODE_ENCODER_MIPI 5 - /* Medfield DSI controller registers */ #define MIPIA_DEVICE_READY_REG 0xb000 diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/staging/gma500/psb_drv.h index 72f487a..fd4732d 100644 --- a/drivers/staging/gma500/psb_drv.h +++ b/drivers/staging/gma500/psb_drv.h @@ -35,7 +35,6 @@ /* Append new drm mode definition here, align with libdrm definition */ #define DRM_MODE_SCALE_NO_SCALE 2 -#define DRM_MODE_CONNECTOR_MIPI 15 enum { CHIP_PSB_8108 = 0, /* Poulsbo */ -- cgit v0.10.2 From 4fec0e0bde09095b6349dc6206dbf19cebcd0a7e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 15 Aug 2011 21:41:43 -0700 Subject: xen: self-balloon needs module.h Fix build errors (found when CONFIG_SYSFS is not enabled): drivers/xen/xen-selfballoon.c:446: warning: data definition has no type or storage class drivers/xen/xen-selfballoon.c:446: warning: type defaults to 'int' in declaration of 'EXPORT_SYMBOL' drivers/xen/xen-selfballoon.c:446: warning: parameter names (without types) in function declaration drivers/xen/xen-selfballoon.c:485: error: expected declaration specifiers or '...' before string constant drivers/xen/xen-selfballoon.c:485: warning: data definition has no type or storage class drivers/xen/xen-selfballoon.c:485: warning: type defaults to 'int' in declaration of 'MODULE_LICENSE' drivers/xen/xen-selfballoon.c:485: warning: function declaration isn't a prototype Signed-off-by: Randy Dunlap Acked-by: Konrad Rzeszutek Wilk Signed-off-by: Linus Torvalds diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 1b4afd8..6ea852e 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -70,6 +70,7 @@ #include #include #include +#include #include #include #include -- cgit v0.10.2 From df3d8ae1f8780166a16dd7d08b4842a4d5b5f2b4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 2 Aug 2011 12:54:31 -0700 Subject: KVM: uses TASKSTATS, depends on NET CONFIG_TASKSTATS just had a change to use netlink, including a change to "depends on NET". Since "select" does not follow dependencies, KVM also needs to depend on NET to prevent build errors when CONFIG_NET is not enabled. Sample of the reported "undefined reference" build errors: taskstats.c:(.text+0x8f686): undefined reference to `nla_put' taskstats.c:(.text+0x8f721): undefined reference to `nla_reserve' taskstats.c:(.text+0x8f8fb): undefined reference to `init_net' taskstats.c:(.text+0x8f905): undefined reference to `netlink_unicast' taskstats.c:(.text+0x8f934): undefined reference to `kfree_skb' taskstats.c:(.text+0x8f9e9): undefined reference to `skb_clone' taskstats.c:(.text+0x90060): undefined reference to `__alloc_skb' taskstats.c:(.text+0x901e9): undefined reference to `skb_put' taskstats.c:(.init.text+0x4665): undefined reference to `genl_register_family' taskstats.c:(.init.text+0x4699): undefined reference to `genl_register_ops' taskstats.c:(.init.text+0x4710): undefined reference to `genl_unregister_ops' taskstats.c:(.init.text+0x471c): undefined reference to `genl_unregister_family' Signed-off-by: Randy Dunlap Signed-off-by: Marcelo Tosatti diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 0a09b58..ff5790d 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -22,6 +22,8 @@ config KVM depends on HAVE_KVM # for device assignment: depends on PCI + # for TASKSTATS/TASK_DELAY_ACCT: + depends on NET select PREEMPT_NOTIFIERS select MMU_NOTIFIER select ANON_INODES -- cgit v0.10.2 From 22cfb0bf6721bb1f865f67bc21e3c36c272faf36 Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Tue, 16 Aug 2011 10:56:54 +0000 Subject: IPoIB: Fix possible NULL dereference in ipoib_start_xmit() Fix a bug introduced in 69cce1d14049 ("net: Abstract dst->neighbour accesses behind helpers.") where we might dereference skb_dst(skb) even if it is NULL, which causes: [ 240.944030] BUG: unable to handle kernel NULL pointer dereference at 0000000000000040 [ 240.948007] IP: [] ipoib_start_xmit+0x39/0x280 [ib_ipoib] [...] [ 240.948007] Call Trace: [ 240.948007] [ 240.948007] [] dev_hard_start_xmit+0x2a0/0x590 [ 240.948007] [] ? arp_create+0x70/0x200 [ 240.948007] [] sch_direct_xmit+0xef/0x1c0 Addresses: https://bugzilla.kernel.org/show_bug.cgi?id=41212 Signed-off-by: Bernd Schubert Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 43f89ba..fe89c46 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -717,11 +717,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_neigh *neigh; - struct neighbour *n; + struct neighbour *n = NULL; unsigned long flags; - n = dst_get_neighbour(skb_dst(skb)); - if (likely(skb_dst(skb) && n)) { + if (likely(skb_dst(skb))) + n = dst_get_neighbour(skb_dst(skb)); + + if (likely(n)) { if (unlikely(!*to_ipoib_neigh(n))) { ipoib_path_lookup(skb, dev); return NETDEV_TX_OK; -- cgit v0.10.2 From c2bceb3d7f145af5a0916bea700f2f9d380901ea Mon Sep 17 00:00:00 2001 From: Lionel Elie Mamane Date: Sat, 13 Aug 2011 14:04:38 +0000 Subject: sit tunnels: propagate IPv6 transport class to IPv4 Type of Service sit tunnels (IPv6 tunnel over IPv4) do not implement the "tos inherit" case to copy the IPv6 transport class byte from the inner packet to the IPv4 type of service byte in the outer packet. By contrast, ipip tunnels and GRE tunnels do. This patch, adapted from the similar code in net/ipv4/ipip.c and net/ipv4/ip_gre.c, implements that. This patch applies to 3.0.1, and has been tested on that version. Signed-off-by: Lionel Elie Mamane Signed-off-by: David S. Miller diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 07bf108..00b15ac 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -672,6 +672,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; + if (tos == 1) + tos = ipv6_get_dsfield(iph6); + /* ISATAP (RFC4214) - must come before 6to4 */ if (dev->priv_flags & IFF_ISATAP) { struct neighbour *neigh = NULL; -- cgit v0.10.2 From eb39d34004888afcc0a44d9c36383cd69fa3b3b9 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 26 Jul 2011 16:59:00 -0700 Subject: target: Change TCM_NON_EXISTENT_LUN response to ASC=LOGICAL UNIT NOT SUPPORTED This patch changes transport_send_check_condition_and_sense() for TCM_NON_EXISTENT_LUN emulation to use 0x25 (LOGICAL UNIT NOT SUPPORTED) instead of the original 0x20 (INVALID COMMAND OPERATION CODE). This is helpful to distinguish between TCM_UNSUPPORTED_SCSI_OPCODE ASC=0x20 exceptions. Signed-off-by: Nicholas A. Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 8976032..cc5a339 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4726,6 +4726,13 @@ int transport_send_check_condition_and_sense( */ switch (reason) { case TCM_NON_EXISTENT_LUN: + /* CURRENT ERROR */ + buffer[offset] = 0x70; + /* ILLEGAL REQUEST */ + buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; + /* LOGICAL UNIT NOT SUPPORTED */ + buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; + break; case TCM_UNSUPPORTED_SCSI_OPCODE: case TCM_SECTOR_COUNT_TOO_MANY: /* CURRENT ERROR */ -- cgit v0.10.2 From d5e2003c2bcda93a8f2e668eb4642d70c9c38301 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 4 Aug 2011 14:52:27 +0000 Subject: Btrfs: detect wether a device supports discard We have a problem where if a user specifies discard but doesn't actually support it we will return EOPNOTSUPP from btrfs_discard_extent. This is a problem because this gets called (in a fashion) from the tree log recovery code, which has a nice little BUG_ON(ret) after it, which causes us to fail the tree log replay. So instead detect wether our devices support discard when we're adding them and then don't issue discards if we know that the device doesn't support it. And just for good measure set ret = 0 in btrfs_issue_discard just in case we still get EOPNOTSUPP so we don't screw anybody up like this again. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 66bac22..059dfa0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1782,6 +1782,9 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, for (i = 0; i < multi->num_stripes; i++, stripe++) { + if (!stripe->dev->can_discard) + continue; + ret = btrfs_issue_discard(stripe->dev->bdev, stripe->physical, stripe->length); @@ -1789,11 +1792,16 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, discarded_bytes += stripe->length; else if (ret != -EOPNOTSUPP) break; + + /* + * Just in case we get back EOPNOTSUPP for some reason, + * just ignore the return value so we don't screw up + * people calling discard_extent. + */ + ret = 0; } kfree(multi); } - if (discarded_bytes && ret == -EOPNOTSUPP) - ret = 0; if (actual_bytes) *actual_bytes = discarded_bytes; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3c5f2fc..a595f87 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -517,6 +517,9 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) fs_devices->rw_devices--; } + if (device->can_discard) + fs_devices->num_can_discard--; + new_device = kmalloc(sizeof(*new_device), GFP_NOFS); BUG_ON(!new_device); memcpy(new_device, device, sizeof(*new_device)); @@ -525,6 +528,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; + new_device->can_discard = 0; list_replace_rcu(&device->dev_list, &new_device->dev_list); call_rcu(&device->rcu, free_device); @@ -564,6 +568,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, fmode_t flags, void *holder) { + struct request_queue *q; struct block_device *bdev; struct list_head *head = &fs_devices->devices; struct btrfs_device *device; @@ -620,6 +625,12 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, seeding = 0; } + q = bdev_get_queue(bdev); + if (blk_queue_discard(q)) { + device->can_discard = 1; + fs_devices->num_can_discard++; + } + device->bdev = bdev; device->in_fs_metadata = 0; device->mode = flags; @@ -1560,6 +1571,7 @@ error: int btrfs_init_new_device(struct btrfs_root *root, char *device_path) { + struct request_queue *q; struct btrfs_trans_handle *trans; struct btrfs_device *device; struct block_device *bdev; @@ -1629,6 +1641,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) lock_chunks(root); + q = bdev_get_queue(bdev); + if (blk_queue_discard(q)) + device->can_discard = 1; device->writeable = 1; device->work.func = pending_bios_fn; generate_random_uuid(device->uuid); @@ -1664,6 +1679,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) root->fs_info->fs_devices->num_devices++; root->fs_info->fs_devices->open_devices++; root->fs_info->fs_devices->rw_devices++; + if (device->can_discard) + root->fs_info->fs_devices->num_can_discard++; root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; if (!blk_queue_nonrot(bdev_get_queue(bdev))) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 7c12d61..6d866db 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -48,6 +48,7 @@ struct btrfs_device { int writeable; int in_fs_metadata; int missing; + int can_discard; spinlock_t io_lock; @@ -104,6 +105,7 @@ struct btrfs_fs_devices { u64 rw_devices; u64 missing_devices; u64 total_rw_bytes; + u64 num_can_discard; struct block_device *latest_bdev; /* all of the devices in the FS, protected by a mutex -- cgit v0.10.2 From 34f3e4f23ca3d259fe078f62a128d97ca83508ef Mon Sep 17 00:00:00 2001 From: liubo Date: Sat, 6 Aug 2011 08:35:23 +0000 Subject: Btrfs: fix an oops of log replay When btrfs recovers from a crash, it may hit the oops below: ------------[ cut here ]------------ kernel BUG at fs/btrfs/inode.c:4580! [...] RIP: 0010:[] [] btrfs_add_link+0x161/0x1c0 [btrfs] [...] Call Trace: [] ? btrfs_inode_ref_index+0x31/0x80 [btrfs] [] add_inode_ref+0x319/0x3f0 [btrfs] [] replay_one_buffer+0x2c7/0x390 [btrfs] [] walk_down_log_tree+0x32a/0x480 [btrfs] [] walk_log_tree+0xf5/0x240 [btrfs] [] btrfs_recover_log_trees+0x250/0x350 [btrfs] [] ? btrfs_recover_log_trees+0x350/0x350 [btrfs] [] open_ctree+0x1442/0x17d0 [btrfs] [...] This comes from that while replaying an inode ref item, we forget to check those old conflicting DIR_ITEM and DIR_INDEX items in fs/file tree, then we will come to conflict corners which lead to BUG_ON(). Signed-off-by: Liu Bo Tested-by: Andy Lutomirski Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index babee65..786639f 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -799,14 +799,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, struct extent_buffer *eb, int slot, struct btrfs_key *key) { - struct inode *dir; - int ret; struct btrfs_inode_ref *ref; + struct btrfs_dir_item *di; + struct inode *dir; struct inode *inode; - char *name; - int namelen; unsigned long ref_ptr; unsigned long ref_end; + char *name; + int namelen; + int ret; int search_done = 0; /* @@ -909,6 +910,25 @@ again: } btrfs_release_path(path); + /* look for a conflicting sequence number */ + di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), + btrfs_inode_ref_index(eb, ref), + name, namelen, 0); + if (di && !IS_ERR(di)) { + ret = drop_one_dir_item(trans, root, path, dir, di); + BUG_ON(ret); + } + btrfs_release_path(path); + + /* look for a conflicing name */ + di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), + name, namelen, 0); + if (di && !IS_ERR(di)) { + ret = drop_one_dir_item(trans, root, path, dir, di); + BUG_ON(ret); + } + btrfs_release_path(path); + insert: /* insert our name */ ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, -- cgit v0.10.2 From 38c01b9605923cfdff5413e0a12e58ee8d962257 Mon Sep 17 00:00:00 2001 From: liubo Date: Tue, 2 Aug 2011 02:39:03 +0000 Subject: Btrfs: fix a bug of balance on full multi-disk partitions When balancing, we'll first try to shrink devices for some space, but if it is working on a full multi-disk partition with raid protection, we may encounter a bug, that is, while shrinking, total_bytes may be less than bytes_used, and btrfs may allocate a dev extent that accesses out of device's bounds. Then we will not be able to write or read the data which stores at the end of the device, and get the followings: device fsid 0939f071-7ea3-46c8-95df-f176d773bfb6 devid 1 transid 10 /dev/sdb5 Btrfs detected SSD devices, enabling SSD mode btrfs: relocating block group 476315648 flags 9 btrfs: found 4 extents attempt to access beyond end of device sdb5: rw=145, want=546176, limit=546147 attempt to access beyond end of device sdb5: rw=145, want=546304, limit=546147 attempt to access beyond end of device sdb5: rw=145, want=546432, limit=546147 attempt to access beyond end of device sdb5: rw=145, want=546560, limit=546147 attempt to access beyond end of device Signed-off-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a595f87..46f9a20 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -863,6 +863,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, max_hole_start = search_start; max_hole_size = 0; + hole_size = 0; if (search_start >= search_end) { ret = -ENOSPC; @@ -945,7 +946,14 @@ next: cond_resched(); } - hole_size = search_end- search_start; + /* + * At this point, search_start should be the end of + * allocated dev extents, and when shrinking the device, + * search_end may be smaller than search_start. + */ + if (search_end > search_start) + hole_size = search_end - search_start; + if (hole_size > max_hole_size) { max_hole_start = search_start; max_hole_size = hole_size; @@ -2447,9 +2455,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, total_avail = device->total_bytes - device->bytes_used; else total_avail = 0; - /* avail is off by max(alloc_start, 1MB), but that is the same - * for all devices, so it doesn't hurt the sorting later on - */ + + /* If there is no space on this device, skip it. */ + if (total_avail == 0) + continue; ret = find_free_dev_extent(trans, device, max_stripe_size * dev_stripes, -- cgit v0.10.2 From cdcb725c05fe0cb71777c66ddc2445fedbbb3c59 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 3 Aug 2011 10:15:25 +0000 Subject: Btrfs: check if there is enough space for balancing smarter When checking if there is enough space for balancing a block group, since we do not take raid types into consideration, we do not account corrent amounts of space that we needed. This makes us do some extra work before we get ENOSPC. Signed-off-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 059dfa0..a3e71b5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6728,6 +6728,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) struct btrfs_space_info *space_info; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; struct btrfs_device *device; + u64 min_free; + int index; + int dev_nr = 0; + int dev_min = 1; int full = 0; int ret = 0; @@ -6737,8 +6741,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) if (!block_group) return -1; + min_free = btrfs_block_group_used(&block_group->item); + /* no bytes used, we're good */ - if (!btrfs_block_group_used(&block_group->item)) + if (!min_free) goto out; space_info = block_group->space_info; @@ -6754,10 +6760,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) * all of the extents from this block group. If we can, we're good */ if ((space_info->total_bytes != block_group->key.offset) && - (space_info->bytes_used + space_info->bytes_reserved + - space_info->bytes_pinned + space_info->bytes_readonly + - btrfs_block_group_used(&block_group->item) < - space_info->total_bytes)) { + (space_info->bytes_used + space_info->bytes_reserved + + space_info->bytes_pinned + space_info->bytes_readonly + + min_free < space_info->total_bytes)) { spin_unlock(&space_info->lock); goto out; } @@ -6774,9 +6779,29 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) if (full) goto out; + /* + * index: + * 0: raid10 + * 1: raid1 + * 2: dup + * 3: raid0 + * 4: single + */ + index = get_block_group_index(block_group); + if (index == 0) { + dev_min = 4; + min_free /= 2; + } else if (index == 1) { + dev_min = 2; + } else if (index == 2) { + min_free *= 2; + } else if (index == 3) { + dev_min = fs_devices->rw_devices; + min_free /= dev_min; + } + mutex_lock(&root->fs_info->chunk_mutex); list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { - u64 min_free = btrfs_block_group_used(&block_group->item); u64 dev_offset; /* @@ -6787,7 +6812,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ret = find_free_dev_extent(NULL, device, min_free, &dev_offset, NULL); if (!ret) + dev_nr++; + + if (dev_nr >= dev_min) break; + ret = -1; } } -- cgit v0.10.2 From cb1b69f4508a1e8c1a7907379eafceb7ae0325ef Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Tue, 9 Aug 2011 07:11:13 +0000 Subject: Btrfs: forced readonly when btrfs_drop_snapshot() fails The filesystem turns readonly instead of returning the error to the caller when detected error in btrfs_drop_snapshot(). and, because the caller doesn't check the error, the function type is changed to 'void'. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a6263bd..8842936 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2367,8 +2367,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); -int btrfs_drop_snapshot(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, int update_ref); +void btrfs_drop_snapshot(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int update_ref); int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a3e71b5..80d6148 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6277,8 +6277,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, * also make sure backrefs for the shared block and all lower level * blocks are properly updated. */ -int btrfs_drop_snapshot(struct btrfs_root *root, - struct btrfs_block_rsv *block_rsv, int update_ref) +void btrfs_drop_snapshot(struct btrfs_root *root, + struct btrfs_block_rsv *block_rsv, int update_ref) { struct btrfs_path *path; struct btrfs_trans_handle *trans; @@ -6291,13 +6291,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int level; path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; + if (!path) { + err = -ENOMEM; + goto out; + } wc = kzalloc(sizeof(*wc), GFP_NOFS); if (!wc) { btrfs_free_path(path); - return -ENOMEM; + err = -ENOMEM; + goto out; } trans = btrfs_start_transaction(tree_root, 0); @@ -6326,7 +6329,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, path->lowest_level = 0; if (ret < 0) { err = ret; - goto out; + goto out_free; } WARN_ON(ret > 0); @@ -6433,11 +6436,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, free_extent_buffer(root->commit_root); kfree(root); } -out: +out_free: btrfs_end_transaction_throttle(trans, tree_root); kfree(wc); btrfs_free_path(path); - return err; +out: + if (err) + btrfs_std_error(root->fs_info, err); + return; } /* -- cgit v0.10.2 From c97c2916e25c56e878e3e94efd449e2d688fcb31 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 3 Aug 2011 08:11:41 +0000 Subject: Btrfs: use plain page_address() in header fields setget functions We've stopped using highmem for extent buffers. Signed-off-by: Li Zefan Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8842936..8b99c79 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1415,17 +1415,15 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ static inline u##bits btrfs_##name(struct extent_buffer *eb) \ { \ - type *p = kmap_atomic(eb->first_page, KM_USER0); \ + type *p = page_address(eb->first_page); \ u##bits res = le##bits##_to_cpu(p->member); \ - kunmap_atomic(p, KM_USER0); \ return res; \ } \ static inline void btrfs_set_##name(struct extent_buffer *eb, \ u##bits val) \ { \ - type *p = kmap_atomic(eb->first_page, KM_USER0); \ + type *p = page_address(eb->first_page); \ p->member = cpu_to_le##bits(val); \ - kunmap_atomic(p, KM_USER0); \ } #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ -- cgit v0.10.2 From f4ac904c411b55e58bb240f332f93db2455f0010 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 5 Aug 2011 14:19:00 +0000 Subject: btrfs: memory leak in btrfs_add_inode_defrag() We don't use the defrag struct on this path. Signed-off-by: Dan Carpenter Signed-off-by: Chris Mason diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 010aec8..0705d15 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -150,6 +150,8 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, spin_lock(&root->fs_info->defrag_inodes_lock); if (!BTRFS_I(inode)->in_defrag) __btrfs_add_inode_defrag(inode, defrag); + else + kfree(defrag); spin_unlock(&root->fs_info->defrag_inodes_lock); return 0; } -- cgit v0.10.2 From bb3ac5a4dfc8eeb881206c77d9f925e320d9c41a Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 5 Aug 2011 09:32:35 +0000 Subject: Btrfs: fix wrong free space information Btrfs subtracted the size of the allocated space twice when it allocated the space from the bitmap in the cluster, it broke the free space information and led to oops finally. And this patch also fixes the bug that ctl->free_space was subtracted without lock. Reported-by: Liu Bo Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 6377713..6a265b9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1168,9 +1168,9 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); } -static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, - struct btrfs_free_space *info, u64 offset, - u64 bytes) +static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info, + u64 offset, u64 bytes) { unsigned long start, count; @@ -1181,6 +1181,13 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, bitmap_clear(info->bitmap, start, count); info->bytes -= bytes; +} + +static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info, u64 offset, + u64 bytes) +{ + __bitmap_clear_bits(ctl, info, offset, bytes); ctl->free_space -= bytes; } @@ -1984,7 +1991,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, return 0; ret = search_start; - bitmap_clear_bits(ctl, entry, ret, bytes); + __bitmap_clear_bits(ctl, entry, ret, bytes); return ret; } @@ -2039,7 +2046,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, continue; } } else { - ret = entry->offset; entry->offset += bytes; -- cgit v0.10.2 From 0e588859618be54ec100373f1b86296271ce5307 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Fri, 5 Aug 2011 09:32:37 +0000 Subject: Btrfs: fix uninitialized sync_pending sync_pending is uninitialized before it be used, fix it. Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 46f9a20..f2a4cc7 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -142,7 +142,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) unsigned long limit; unsigned long last_waited = 0; int force_reg = 0; - int sync_pending; + int sync_pending = 0; struct blk_plug plug; /* -- cgit v0.10.2 From f81c9cdc567cd3160ff9e64868d9a1a7ee226480 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 10 Aug 2011 18:04:04 +0000 Subject: Btrfs: truncate pages from clone ioctl target range We need to truncate page cache pages for the clone ioctl target range or else we'll confuse ourselves to no end. If the old data was cached, we used to still see it (until remount). If the page was partially updated we used to get a mix of old and new data. Signed-off-by: Sage Weil Signed-off-by: Chris Mason diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2bb0886..b3d249d 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2244,6 +2244,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, btrfs_wait_ordered_range(src, off, len); } + /* truncate page cache pages from target inode range */ + truncate_inode_pages_range(&inode->i_data, off, + ALIGN(off + len, PAGE_CACHE_SIZE) - 1); + /* clone data */ key.objectid = btrfs_ino(src); key.type = BTRFS_EXTENT_DATA_KEY; -- cgit v0.10.2 From c331eb580a0a7906c0cdb8dbae3cfe99e3c0e555 Mon Sep 17 00:00:00 2001 From: Andrew Drake Date: Tue, 16 Aug 2011 11:07:39 -0700 Subject: Input: bcm5974 - Add support for newer MacBookPro8,2 New MacBook Pro devices reporting product name MacBookPro8,2 come with newer/higher resolution touchpads than others with the same product name with USB ID 05ac:0252. This patch adds support for these devices. Signed-off-by: Andrew Drake Reviewed-by: Wanlong Gao Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 48d9ec1..da28018 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -71,6 +71,10 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c #define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d #define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e +/* Macbook8,2 (unibody) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253 +#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -112,6 +116,10 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING6_JIS), + /* MacbookPro8,2 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS), /* Terminating entry */ {} }; @@ -314,6 +322,18 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, + { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } + }, {} }; -- cgit v0.10.2 From 28ac293363368650963ee4c1e323c1ff502c121f Mon Sep 17 00:00:00 2001 From: Yufeng Shen Date: Tue, 16 Aug 2011 00:40:54 -0700 Subject: Input: atmel_mxt_ts - report pressure information from the driver Atmel mxt1386 touch controller has the touch pressure information so let's report it to the user space. [dtor@mail.ru: added ABS_RESSURE reporting for ST emulation.] Signed-off-by: Yufeng Shen Acked-by: Wanlong Gao Acked-by: Henrik Rydberg Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index ae00604..f5d6685 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -244,6 +244,7 @@ struct mxt_finger { int x; int y; int area; + int pressure; }; /* Each client has this additional data */ @@ -536,6 +537,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id) finger[id].x); input_report_abs(input_dev, ABS_MT_POSITION_Y, finger[id].y); + input_report_abs(input_dev, ABS_MT_PRESSURE, + finger[id].pressure); } else { finger[id].status = 0; } @@ -546,6 +549,8 @@ static void mxt_input_report(struct mxt_data *data, int single_id) if (status != MXT_RELEASE) { input_report_abs(input_dev, ABS_X, finger[single_id].x); input_report_abs(input_dev, ABS_Y, finger[single_id].y); + input_report_abs(input_dev, + ABS_PRESSURE, finger[single_id].pressure); } input_sync(input_dev); @@ -560,6 +565,7 @@ static void mxt_input_touchevent(struct mxt_data *data, int x; int y; int area; + int pressure; /* Check the touch is present on the screen */ if (!(status & MXT_DETECT)) { @@ -584,6 +590,7 @@ static void mxt_input_touchevent(struct mxt_data *data, y = y >> 2; area = message->message[4]; + pressure = message->message[5]; dev_dbg(dev, "[%d] %s x: %d, y: %d, area: %d\n", id, status & MXT_MOVE ? "moved" : "pressed", @@ -594,6 +601,7 @@ static void mxt_input_touchevent(struct mxt_data *data, finger[id].x = x; finger[id].y = y; finger[id].area = area; + finger[id].pressure = pressure; mxt_input_report(data, id); } @@ -1116,6 +1124,8 @@ static int __devinit mxt_probe(struct i2c_client *client, 0, data->max_x, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, data->max_y, 0, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, + 0, 255, 0, 0); /* For multi touch */ input_mt_init_slots(input_dev, MXT_MAX_FINGER); @@ -1125,6 +1135,8 @@ static int __devinit mxt_probe(struct i2c_client *client, 0, data->max_x, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, data->max_y, 0, 0); + input_set_abs_params(input_dev, ABS_MT_PRESSURE, + 0, 255, 0, 0); input_set_drvdata(input_dev, data); i2c_set_clientdata(client, data); -- cgit v0.10.2 From 8c320c079cde0286d71368961231e426539868b4 Mon Sep 17 00:00:00 2001 From: Jonas Aberg Date: Wed, 17 Aug 2011 19:10:06 +0900 Subject: fat: fix build warning This fixes a compile warning (unititialized variable) in the fat filesystem code. Signed-off-by: Jonas Aberg Signed-off-by: Lee Jones Signed-off-by: Linus Walleij Signed-off-by: OGAWA Hirofumi diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 4ad6473..5efbd5d 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -1231,7 +1231,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots, struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ - struct msdos_dir_entry *de; + struct msdos_dir_entry *uninitialized_var(de); int err, free_slots, i, nr_bhs; loff_t pos, i_pos; -- cgit v0.10.2 From 186b53701ca5a843b07ca44a8d954dc6043c70f4 Mon Sep 17 00:00:00 2001 From: Mihai Moldovan Date: Wed, 17 Aug 2011 19:10:08 +0900 Subject: fat: fix utf8 iocharset warning message The fat_msg function already formats the given message and appends a newline to it - we don't need to do this in the passed message string as well, or will end up with a blank line printed in the kernel log ring buffer. Also change the loglevel from error to warning. Signed-off-by: Mihai Moldovan Signed-off-by: OGAWA Hirofumi diff --git a/fs/fat/inode.c b/fs/fat/inode.c index cb8d839..52bcf58 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1186,9 +1186,9 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat, out: /* UTF-8 doesn't provide FAT semantics */ if (!strcmp(opts->iocharset, "utf8")) { - fat_msg(sb, KERN_ERR, "utf8 is not a recommended IO charset" + fat_msg(sb, KERN_WARNING, "utf8 is not a recommended IO charset" " for FAT filesystems, filesystem will be " - "case sensitive!\n"); + "case sensitive!"); } /* If user doesn't specify allow_utime, it's initialized from dmask. */ -- cgit v0.10.2 From 710d4403a45c4040a9aa86971d50958f5ae6ed40 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Wed, 17 Aug 2011 19:10:09 +0900 Subject: fat: fat16 support maximum 4GB file/vol size as WinXP or 7. FAT16 support maximum 4GB vol/file size with 64KB cluster size. Win NT/XP/7 increased the maximum cluster size to 64KB, and file/vol size increased 4GB also. Although increasing, the file size of linux FAT is still limited at 2GB. I found that it is limited by sb->maxbytes(0x7fffffff) when partition is formatted by FAT16. sb->s_maxbytes in fill_super should be set to 0xffffffff like fat32. Signed-off-by: Namjae Jeon Signed-off-by: OGAWA Hirofumi diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 52bcf58..017493b 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1365,6 +1365,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, sbi->free_clusters = -1; /* Don't know yet */ sbi->free_clus_valid = 0; sbi->prev_free = FAT_START_ENT; + sb->s_maxbytes = 0xffffffff; if (!sbi->fat_length && b->fat32_length) { struct fat_boot_fsinfo *fsinfo; @@ -1375,8 +1376,6 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, sbi->fat_length = le32_to_cpu(b->fat32_length); sbi->root_cluster = le32_to_cpu(b->root_cluster); - sb->s_maxbytes = 0xffffffff; - /* MC - if info_sector is 0, don't multiply by 0 */ sbi->fsinfo_sector = le16_to_cpu(b->info_sector); if (sbi->fsinfo_sector == 0) -- cgit v0.10.2 From ccbcdf7cf1b5f6c6db30d84095b9c6c53043af55 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 16 Aug 2011 15:07:41 +0100 Subject: xen/x86: replace order-based range checking of M2P table by linear one The order-based approach is not only less efficient (requiring a shift and a compare, typical generated code looking like this mov eax, [machine_to_phys_order] mov ecx, eax shr ebx, cl test ebx, ebx jnz ... whereas a direct check requires just a compare, like in cmp ebx, [machine_to_phys_nr] jae ... ), but also slightly dangerous in the 32-on-64 case - the element address calculation can wrap if the next power of two boundary is sufficiently far away from the actual upper limit of the table, and hence can result in user space addresses being accessed (with it being unknown what may actually be mapped there). Additionally, the elimination of the mistaken use of fls() here (should have been __fls()) fixes a latent issue on x86-64 that would trigger if the code was run on a system with memory extending beyond the 44-bit boundary. CC: stable@kernel.org Signed-off-by: Jan Beulich [v1: Based on Jeremy's feedback] Signed-off-by: Konrad Rzeszutek Wilk diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 64a619d..7ff4669 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h @@ -39,7 +39,7 @@ typedef struct xpaddr { ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) extern unsigned long *machine_to_phys_mapping; -extern unsigned int machine_to_phys_order; +extern unsigned long machine_to_phys_nr; extern unsigned long get_phys_to_machine(unsigned long pfn); extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); @@ -87,7 +87,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) if (xen_feature(XENFEAT_auto_translated_physmap)) return mfn; - if (unlikely((mfn >> machine_to_phys_order) != 0)) { + if (unlikely(mfn >= machine_to_phys_nr)) { pfn = ~0; goto try_override; } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 974a528..b960429 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(xen_domain_type); unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; EXPORT_SYMBOL(machine_to_phys_mapping); -unsigned int machine_to_phys_order; -EXPORT_SYMBOL(machine_to_phys_order); +unsigned long machine_to_phys_nr; +EXPORT_SYMBOL(machine_to_phys_nr); struct start_info *xen_start_info; EXPORT_SYMBOL_GPL(xen_start_info); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f987bde..24abc1f 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1713,15 +1713,19 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) void __init xen_setup_machphys_mapping(void) { struct xen_machphys_mapping mapping; - unsigned long machine_to_phys_nr_ents; if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; - machine_to_phys_nr_ents = mapping.max_mfn + 1; + machine_to_phys_nr = mapping.max_mfn + 1; } else { - machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; + machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } - machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); +#ifdef CONFIG_X86_32 + if ((machine_to_phys_mapping + machine_to_phys_nr) + < machine_to_phys_mapping) + machine_to_phys_nr = (unsigned long *)NULL + - machine_to_phys_mapping; +#endif } #ifdef CONFIG_X86_64 -- cgit v0.10.2 From 0ace64b85ea7b90e3bffe408b9d7c3364692bfa4 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 1 Aug 2011 21:12:09 +0000 Subject: IBiser: Fix wrong mask when sizeof (dma_addr_t) > sizeof (unsigned long) The code that prepares the SG associated with SCSI command for FMR was buggy for systems with DMA addresses that don't fit in unsigned long, e.g under the 32-bit based XenServer dom0 sizeof(dma_addr_t) is 8. Fix that by casting to unsigned long long a masking constant used by the code. This resolves a crash in iser_sg_to_page_vec on this system. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 342cbc1..db6f3ce 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -89,7 +89,7 @@ } while (0) #define SHIFT_4K 12 -#define SIZE_4K (1UL << SHIFT_4K) +#define SIZE_4K (1ULL << SHIFT_4K) #define MASK_4K (~(SIZE_4K-1)) /* support up to 512KB in one RDMA */ -- cgit v0.10.2 From 200ae1a08bec8f3fedfcfe94c892d9a024db4e46 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 1 Aug 2011 21:14:09 +0000 Subject: IB/iser: Support iSCSI PDU padding RFC3270 mandates that iSCSI PDUs are padded to the closest integer number of four byte words. Fix the iser code to support that on both the TX/RX flows. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 8db008d..9c61b9c 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -101,13 +101,17 @@ iscsi_iser_recv(struct iscsi_conn *conn, /* verify PDU length */ datalen = ntoh24(hdr->dlength); - if (datalen != rx_data_len) { - printk(KERN_ERR "iscsi_iser: datalen %d (hdr) != %d (IB) \n", - datalen, rx_data_len); + if (datalen > rx_data_len || (datalen + 4) < rx_data_len) { + iser_err("wrong datalen %d (hdr), %d (IB)\n", + datalen, rx_data_len); rc = ISCSI_ERR_DATALEN; goto error; } + if (datalen != rx_data_len) + iser_dbg("aligned datalen (%d) hdr, %d (IB)\n", + datalen, rx_data_len); + /* read AHS */ ahslen = hdr->hlength * 4; diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 5745b7f..f299de6 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -412,7 +412,7 @@ int iser_send_control(struct iscsi_conn *conn, memcpy(iser_conn->ib_conn->login_buf, task->data, task->data_count); tx_dsg->addr = iser_conn->ib_conn->login_dma; - tx_dsg->length = data_seg_len; + tx_dsg->length = task->data_count; tx_dsg->lkey = device->mr->lkey; mdesc->num_sge = 2; } -- cgit v0.10.2 From f991879473828f320a714e9494fb37a26ccd6b66 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 17 Aug 2011 13:45:09 +0100 Subject: mm: make HASHED_PAGE_VIRTUAL page_address' struct page argument const. Followup to 33dd4e0ec911 "mm: make some struct page's const" which missed the HASHED_PAGE_VIRTUAL case. Signed-off-by: Ian Campbell Cc: Andrea Arcangeli Cc: Rik van Riel Cc: Michel Lespinasse Cc: Mel Gorman Cc: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/include/linux/hash.h b/include/linux/hash.h index 06d25c1..b80506b 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -63,7 +63,7 @@ static inline u32 hash_32(u32 val, unsigned int bits) return hash >> (32 - bits); } -static inline unsigned long hash_ptr(void *ptr, unsigned int bits) +static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) { return hash_long((unsigned long)ptr, bits); } diff --git a/include/linux/mm.h b/include/linux/mm.h index fd599f4..c06454d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -737,7 +737,7 @@ static __always_inline void *lowmem_page_address(const struct page *page) #endif #if defined(HASHED_PAGE_VIRTUAL) -void *page_address(struct page *page); +void *page_address(const struct page *page); void set_page_address(struct page *page, void *virtual); void page_address_init(void); #endif diff --git a/mm/highmem.c b/mm/highmem.c index 693394d..5ef672c 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -326,7 +326,7 @@ static struct page_address_slot { spinlock_t lock; /* Protect this bucket's list */ } ____cacheline_aligned_in_smp page_address_htable[1< Date: Wed, 17 Aug 2011 17:40:33 +0100 Subject: mm: fix __page_to_pfn for a const struct page argument This allows the cast in lowmem_page_address (introduced as a warning fixup to 33dd4e0ec911 "mm: make some struct page's const") to be removed. Propagate const'ness to page_to_section() as well since it is required by __page_to_pfn. Signed-off-by: Ian Campbell Acked-by: Rik van Riel Cc: Andrea Arcangeli Cc: Michel Lespinasse Cc: Mel Gorman Cc: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index fb2d63f..aea9e45 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -39,7 +39,7 @@ }) #define __page_to_pfn(pg) \ -({ struct page *__pg = (pg); \ +({ const struct page *__pg = (pg); \ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ (unsigned long)(__pg - __pgdat->node_mem_map) + \ __pgdat->node_start_pfn; \ @@ -57,7 +57,7 @@ * section[i].section_mem_map == mem_map's address - start_pfn; */ #define __page_to_pfn(pg) \ -({ struct page *__pg = (pg); \ +({ const struct page *__pg = (pg); \ int __sec = page_to_section(__pg); \ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \ }) diff --git a/include/linux/mm.h b/include/linux/mm.h index c06454d..7438071 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -685,7 +685,7 @@ static inline void set_page_section(struct page *page, unsigned long section) page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; } -static inline unsigned long page_to_section(struct page *page) +static inline unsigned long page_to_section(const struct page *page) { return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; } @@ -720,7 +720,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, static __always_inline void *lowmem_page_address(const struct page *page) { - return __va(PFN_PHYS(page_to_pfn((struct page *)page))); + return __va(PFN_PHYS(page_to_pfn(page))); } #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) -- cgit v0.10.2 From 338d0f0a6fbc82407864606f5b64b75aeb3c70f2 Mon Sep 17 00:00:00 2001 From: Timo Warns Date: Wed, 17 Aug 2011 17:59:56 +0200 Subject: befs: Validate length of long symbolic links. Signed-off-by: Timo Warns Signed-off-by: Linus Torvalds diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 54b8c28..720d885 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -474,17 +474,22 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd) befs_data_stream *data = &befs_ino->i_data.ds; befs_off_t len = data->size; - befs_debug(sb, "Follow long symlink"); - - link = kmalloc(len, GFP_NOFS); - if (!link) { - link = ERR_PTR(-ENOMEM); - } else if (befs_read_lsymlink(sb, data, link, len) != len) { - kfree(link); - befs_error(sb, "Failed to read entire long symlink"); + if (len == 0) { + befs_error(sb, "Long symlink with illegal length"); link = ERR_PTR(-EIO); } else { - link[len - 1] = '\0'; + befs_debug(sb, "Follow long symlink"); + + link = kmalloc(len, GFP_NOFS); + if (!link) { + link = ERR_PTR(-ENOMEM); + } else if (befs_read_lsymlink(sb, data, link, len) != len) { + kfree(link); + befs_error(sb, "Failed to read entire long symlink"); + link = ERR_PTR(-EIO); + } else { + link[len - 1] = '\0'; + } } } else { link = befs_ino->i_data.symlink; -- cgit v0.10.2 From 8919bc13e8d92c5b082c5c0321567383a071f5bc Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Mon, 15 Aug 2011 05:25:40 +0000 Subject: net_sched: fix port mirror/redirect stats reporting When a redirected or mirrored packet is dropped by the target device we need to record statistics. Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 102fc21..e051398 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -196,8 +196,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; - dev_queue_xmit(skb2); - err = 0; + err = dev_queue_xmit(skb2); out: if (err) { -- cgit v0.10.2 From ba3211ccd043fae3713793334d64d75bd0a1d029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20Pan=28=E6=BD=98=E5=8D=AB=E5=B9=B3=29?= Date: Mon, 15 Aug 2011 15:57:35 +0000 Subject: bonding:reset backup and inactive flag of slave Eduard Sinelnikov (eduard.sinelnikov@gmail.com) found that if we change bonding mode from active backup to round robin, some slaves are still keeping "backup", and won't transmit packets. As Jay Vosburgh(fubar@us.ibm.com) pointed out that we can work around that by removing the bond_is_active_slave() check, because the "backup" flag is only meaningful for active backup mode. But if we just simply ignore the bond_is_active_slave() check, the transmission will work fine, but we can't maintain the correct value of "backup" flag for each slaves, though it is meaningless for other mode than active backup. I'd like to reset "backup" and "inactive" flag in bond_open, thus we can keep the correct value of them. As for bond_is_active_slave(), I'd like to prepare another patch to handle it. V2: Use C style comment. Move read_lock(&bond->curr_slave_lock). Replace restore with reset, for active backup mode, it means "restore", but for other modes, it means "reset". Signed-off-by: Weiping Pan Reviewed-by: WANG Cong Signed-off-by: David S. Miller diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 38a83ac..43f2ea5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3419,9 +3419,27 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) static int bond_open(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave; + int i; bond->kill_timers = 0; + /* reset slave->backup and slave->inactive */ + read_lock(&bond->lock); + if (bond->slave_cnt > 0) { + read_lock(&bond->curr_slave_lock); + bond_for_each_slave(bond, slave, i) { + if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) + && (slave != bond->curr_active_slave)) { + bond_set_slave_inactive_flags(slave); + } else { + bond_set_slave_active_flags(slave); + } + } + read_unlock(&bond->curr_slave_lock); + } + read_unlock(&bond->lock); + INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed); if (bond_is_lb(bond)) { -- cgit v0.10.2 From 9a75a97296c43c34add7dca8275496186e1b4ae9 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 15 Aug 2011 22:33:34 +0000 Subject: via-velocity: remove non-tagged packet filtering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's undesired to filter untagged packets at any time. So simply remove this. Reported-by: Stephan Bärwolf Tested-by: Stephan Bärwolf Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index deb1eca..7c5336c 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -515,10 +515,6 @@ static void velocity_init_cam_filter(struct velocity_info *vptr) mac_set_cam_mask(regs, vptr->mCAMmask); /* Enable VCAMs */ - - if (test_bit(0, vptr->active_vlans)) - WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); - for_each_set_bit(vid, vptr->active_vlans, VLAN_N_VID) { mac_set_vlan_cam(regs, i, (u8 *) &vid); vptr->vCAMmask[i / 8] |= 0x1 << (i % 8); -- cgit v0.10.2 From 9331db4f00cfee8a79d2147ac83723ef436b9759 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 17 Aug 2011 23:50:37 -0700 Subject: forcedeth: call vlan_mode only if hw supports vlans If hw does not support vlans, dont call nv_vlan_mode because it has no point. I believe that this should fix issues on older non-vlan supportive chips (like Ingo has). Reported-ty: Ingo Molnar Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index e55df30..6d5fbd4 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5615,7 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } - nv_vlan_mode(dev, dev->features); + if (id->driver_data & DEV_HAS_VLAN) + nv_vlan_mode(dev, dev->features); netif_carrier_off(dev); -- cgit v0.10.2 From 77e57297b4ff3f602ba5105398d342a4b4a54774 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 23 May 2011 14:39:17 +0200 Subject: perf list: Fix exit value This patch fixes an issue with the exit value of perf list: $ perf list; echo $? 129 perf list returns an error exit code even though there is no error. There was a stray exit(129) in print_events(). This patch removes this exit(). $ perf list; echo $? 0 $ perf list hw sw cpu-cycles OR cycles [Hardware event] stalled-cycles-frontend OR idle-cycles-frontend [Hardware event] stalled-cycles-backend OR idle-cycles-backend [Hardware event] instructions [Hardware event] cache-references [Hardware event] cache-misses [Hardware event] branch-instructions OR branches [Hardware event] branch-misses [Hardware event] bus-cycles [Hardware event] cpu-clock [Software event] task-clock [Software event] page-faults OR faults [Software event] minor-faults [Software event] major-faults [Software event] context-switches OR cs [Software event] cpu-migrations OR migrations [Software event] alignment-faults [Software event] emulation-faults [Software event] $ echo $? 0 Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110523123917.GA31060@quad Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 4ea7e19..d93f3ce 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1097,6 +1097,4 @@ void print_events(const char *event_glob) printf("\n"); print_tracepoint_events(NULL, NULL); - - exit(129); } -- cgit v0.10.2 From cc2d86b04d9ac28a6be6cb05da6ea8f014fd5aa0 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 7 Jun 2011 18:19:36 +0200 Subject: perf evlist: Fix missing event name init for default event When no event is given to perf record, perf top, a default event is initialized (cycles). However, perf_evlist__add_default() was not setting the symbolic name for the event. Perf top worked simply because it was reconstructing the name from the event code. But it should not have to do this. This patch initializes the evsel->name field properly. This second version improves the code flow on the non error path. Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110607161936.GA8163@quad Signed-off-by: Stephane Eranian [committer note: Use perf_evsel__delete() instead of plain free()] Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index e03e7bc..c12bd47 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -85,10 +85,19 @@ int perf_evlist__add_default(struct perf_evlist *evlist) struct perf_evsel *evsel = perf_evsel__new(&attr, 0); if (evsel == NULL) - return -ENOMEM; + goto error; + + /* use strdup() because free(evsel) assumes name is allocated */ + evsel->name = strdup("cycles"); + if (!evsel->name) + goto error_free; perf_evlist__add(evlist, evsel); return 0; +error_free: + perf_evsel__delete(evsel); +error: + return -ENOMEM; } void perf_evlist__disable(struct perf_evlist *evlist) -- cgit v0.10.2 From 777d1d71db622a5e1ff703495741c3d257b532e5 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Sat, 23 Jul 2011 04:10:43 +0200 Subject: perf tools: Fix error handling of unknown events There was a problem with the parse_events() code not printing the correct event name when an event was unknown and starting with an 'r'. The source of the problem was the way raw notation was parsed. Without the patch: $ perf stat -e retired_foo invalid event modifier: 'tired_foo' With the patch: $ perf stat -e retired_foo invalid or unsupported event: 'retired_foo' This also covers the case where the name of the event was not printed at all when perf was linked with libpfm4. Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110723021043.GA20178@quad Signed-off-by: Stephane Eranian Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index d93f3ce..928918b 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -697,7 +697,11 @@ parse_raw_event(const char **strp, struct perf_event_attr *attr) return EVT_FAILED; n = hex2u64(str + 1, &config); if (n > 0) { - *strp = str + n + 1; + const char *end = str + n + 1; + if (*end != '\0' && *end != ',' && *end != ':') + return EVT_FAILED; + + *strp = end; attr->type = PERF_TYPE_RAW; attr->config = config; return EVT_HANDLED; -- cgit v0.10.2 From 195bcbf5078d74c8e00d68f04eb8695196fb31e8 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Thu, 18 Aug 2011 07:37:21 -0400 Subject: perf tools: Fix build against newer glibc Upstream glibc commit 295e904 added a definition for __attribute_const__ to cdefs.h. This causes the following error when building perf: util/include/linux/compiler.h:8:0: error: "__attribute_const__" redefined [-Werror] /usr/include/sys/cdefs.h:226:0: note: this is the location of the previous definition Wrap __attribute_const__ in #ifndef as we do for __always_inline. Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20110818113720.GL2227@zod.bos.redhat.com Signed-off-by: Josh Boyer Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h index 791f9dd..547628e 100644 --- a/tools/perf/util/include/linux/compiler.h +++ b/tools/perf/util/include/linux/compiler.h @@ -5,7 +5,9 @@ #define __always_inline inline #endif #define __user +#ifndef __attribute_const__ #define __attribute_const__ +#endif #define __used __attribute__((__unused__)) -- cgit v0.10.2 From d53e8365eaacfdb29253b39d186109f5b4fcc08d Mon Sep 17 00:00:00 2001 From: Geunsik Lim Date: Thu, 18 Aug 2011 16:44:57 +0900 Subject: MAINTAINERS: Fix list of perf events source files Recent changes made kernel/perf_event.c be split and moved to kernel/events/. Cc: Ingo Molnar Cc: Jiri Kosina Cc: Joe Perches Cc: Li Zefan Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1313653497-27263-1-git-send-email-leemgs1@gmail.com Signed-off-by: Geunsik Lim Signed-off-by: Arnaldo Carvalho de Melo diff --git a/MAINTAINERS b/MAINTAINERS index 1e55e1e..7110675 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4971,7 +4971,7 @@ M: Paul Mackerras M: Ingo Molnar M: Arnaldo Carvalho de Melo S: Supported -F: kernel/perf_event*.c +F: kernel/events/* F: include/linux/perf_event.h F: arch/*/kernel/perf_event*.c F: arch/*/kernel/*/perf_event*.c -- cgit v0.10.2 From 43bece79796c2a39ec98998fd3f1071f04f3d8c3 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Wed, 17 Aug 2011 18:42:07 +0800 Subject: perf tools: Add group event scheduling option to perf record/stat Group event scheduling command line option is missing in perf record/stat. Add it to perf record/stat, which is same as in perf top. Reported-by: Andi Kleen Cc: Andi Kleen Cc: Ingo Molnar Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1313577727.2754.5.camel@hp6530s Signed-off-by: Lin Ming Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index f6426b4..6b0519f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -45,7 +45,7 @@ static int freq = 1000; static int output; static int pipe_output = 0; static const char *output_name = NULL; -static int group = 0; +static bool group = false; static int realtime_prio = 0; static bool nodelay = false; static bool raw_samples = false; @@ -753,6 +753,8 @@ const struct option record_options[] = { "child tasks do not inherit counters"), OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"), OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"), + OPT_BOOLEAN(0, "group", &group, + "put the counters into a counter group"), OPT_BOOLEAN('g', "call-graph", &call_graph, "do call-graph (stack chain/backtrace) recording"), OPT_INCR('v', "verbose", &verbose, diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 1ad04ce..5deb17d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -193,6 +193,7 @@ static int big_num_opt = -1; static const char *cpu_list; static const char *csv_sep = NULL; static bool csv_output = false; +static bool group = false; static volatile int done = 0; @@ -280,14 +281,14 @@ static int create_perf_stat_counter(struct perf_evsel *evsel) attr->inherit = !no_inherit; if (system_wide) - return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false); + return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group); if (target_pid == -1 && target_tid == -1) { attr->disabled = 1; attr->enable_on_exec = 1; } - return perf_evsel__open_per_thread(evsel, evsel_list->threads, false); + return perf_evsel__open_per_thread(evsel, evsel_list->threads, group); } /* @@ -1043,6 +1044,8 @@ static const struct option options[] = { "stat events on existing thread id"), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), + OPT_BOOLEAN('g', "group", &group, + "put the counters into a counter group"), OPT_BOOLEAN('c', "scale", &scale, "scale/normalize counters"), OPT_INCR('v', "verbose", &verbose, -- cgit v0.10.2 From 3fe45aeaf2033c9eaa5028ed5ba68b466008876f Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 18 Aug 2011 15:13:17 +0200 Subject: ALSA: hda - Add "PCM" volume to vmaster slave list The new parser may use "PCM" volume, but it was missing the vmaster slave list, thus "Master" volume didn't control it. Reference: https://bugzilla.kernel.org/show_bug.cgi?id=41342 Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 9a1aa09..fcb11af 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1784,6 +1784,7 @@ static const char * const alc_slave_vols[] = { "Speaker Playback Volume", "Mono Playback Volume", "Line-Out Playback Volume", + "PCM Playback Volume", NULL, }; @@ -1798,6 +1799,7 @@ static const char * const alc_slave_sws[] = { "Mono Playback Switch", "IEC958 Playback Switch", "Line-Out Playback Switch", + "PCM Playback Switch", NULL, }; -- cgit v0.10.2 From cb6db4e57632ba8589cc2f9fe1d0aa9116b87ab8 Mon Sep 17 00:00:00 2001 From: Jeff Mahoney Date: Mon, 15 Aug 2011 17:27:21 +0000 Subject: btrfs: btrfs_permission's RO check shouldn't apply to device nodes This patch tightens the read-only access checks in btrfs_permission to match the constraints in inode_permission. Currently, even though the device node itself will be unmodified, read-write access to device nodes is denied to when the device node resides on a read-only subvolume or a is a file that has been marked read-only by the btrfs conversion utility. With this patch applied, the check only affects regular files, directories, and symlinks. It also restructures the code a bit so that we don't duplicate the MAY_WRITE check for both tests. Signed-off-by: Jeff Mahoney Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 15fceef..0ccc743 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7354,11 +7354,15 @@ static int btrfs_set_page_dirty(struct page *page) static int btrfs_permission(struct inode *inode, int mask) { struct btrfs_root *root = BTRFS_I(inode)->root; + umode_t mode = inode->i_mode; - if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) - return -EROFS; - if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) - return -EACCES; + if (mask & MAY_WRITE && + (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) { + if (btrfs_root_readonly(root)) + return -EROFS; + if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) + return -EACCES; + } return generic_permission(inode, mask); } -- cgit v0.10.2 From 9a4327ca1f45f82edad7dc0a4e52ce9316e0950c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 18 Aug 2011 10:16:05 -0400 Subject: btrfs: unlock on error in btrfs_file_llseek() There were some unlocks on error missing in a recent patch to btrfs_file_llseek(). Signed-off-by: Dan Carpenter Signed-off-by: Chris Mason diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 658d669..f7d9df7 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1804,10 +1804,14 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) } } - if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) - return -EINVAL; - if (offset > inode->i_sb->s_maxbytes) - return -EINVAL; + if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) { + ret = -EINVAL; + goto out; + } + if (offset > inode->i_sb->s_maxbytes) { + ret = -EINVAL; + goto out; + } /* Special lock needed here? */ if (offset != file->f_pos) { -- cgit v0.10.2 From f1e490a7ebe41e06324abbbcd86005b0af02a375 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 18 Aug 2011 10:36:39 -0400 Subject: Btrfs: set i_size properly when fallocating and we already xfstests exposed a problem with preallocate when it fallocates a range that already has an extent. We don't set the new i_size properly because we see that we already have an extent. This isn't right and we should update i_size if the space already exists. With this patch we now pass xfstests 075. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0705d15..15e5a1c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1631,11 +1631,15 @@ static long btrfs_fallocate(struct file *file, int mode, cur_offset = alloc_start; while (1) { + u64 actual_end; + em = btrfs_get_extent(inode, NULL, 0, cur_offset, alloc_end - cur_offset, 0); BUG_ON(IS_ERR_OR_NULL(em)); last_byte = min(extent_map_end(em), alloc_end); + actual_end = min_t(u64, extent_map_end(em), offset + len); last_byte = (last_byte + mask) & ~mask; + if (em->block_start == EXTENT_MAP_HOLE || (cur_offset >= inode->i_size && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { @@ -1648,6 +1652,16 @@ static long btrfs_fallocate(struct file *file, int mode, free_extent_map(em); break; } + } else if (actual_end > inode->i_size && + !(mode & FALLOC_FL_KEEP_SIZE)) { + /* + * We didn't need to allocate any more space, but we + * still extended the size of the file so we need to + * update i_size. + */ + inode->i_ctime = CURRENT_TIME; + i_size_write(inode, actual_end); + btrfs_ordered_update_i_size(inode, actual_end, NULL); } free_extent_map(em); -- cgit v0.10.2 From 13589c437daf4c8e429b3236c0b923de1c9420d8 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 18 Aug 2011 04:41:55 +0000 Subject: [CIFS] possible memory corruption on mount CIFS cleanup_volume_info_contents() looks like having a memory corruption problem. When UNCip is set to "&vol->UNC[2]" in cifs_parse_mount_options(), it should not be kfree()-ed in cleanup_volume_info_contents(). Introduced in commit b946845a9dc523c759cae2b6a0f6827486c3221a Signed-off-by: J.R. Okajima Reviewed-by: Jeff Layton CC: Stable Signed-off-by: Steve French diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 80c2e3a..633c246 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2878,7 +2878,8 @@ cleanup_volume_info_contents(struct smb_vol *volume_info) kfree(volume_info->username); kzfree(volume_info->password); kfree(volume_info->UNC); - kfree(volume_info->UNCip); + if (volume_info->UNCip != volume_info->UNC + 2) + kfree(volume_info->UNCip); kfree(volume_info->domainname); kfree(volume_info->iocharset); kfree(volume_info->prepath); -- cgit v0.10.2 From 04c05b4a68c0ab0d6bb41c710a646e56f62a70a3 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 18 Aug 2011 04:44:35 +0000 Subject: update cifs version to 1.75 Signed-off-by: Steve French diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index cb71dc1..95da802 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "1.74" +#define CIFS_VERSION "1.75" #endif /* _CIFSFS_H */ -- cgit v0.10.2 From 8cf2d2399ab60842f55598bc1b00fd15503b9950 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Thu, 18 Aug 2011 09:17:00 +0200 Subject: i7core_edac: fixed typo in error count calculation Based on a patch from the PaX Team, found during a clang analysis pass. Signed-off-by: Mathias Krause Acked-by: Mauro Carvalho Chehab Cc: PaX Team Cc: stable@kernel.org [v2.6.35+] Signed-off-by: Linus Torvalds diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 04f1e7c..f6cf448 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, char *type, *optype, *err, *msg; unsigned long error = m->status & 0x1ff0000l; u32 optypenum = (m->status >> 4) & 0x07; - u32 core_err_cnt = (m->status >> 38) && 0x7fff; + u32 core_err_cnt = (m->status >> 38) & 0x7fff; u32 dimm = (m->misc >> 16) & 0x3; u32 channel = (m->misc >> 18) & 0x3; u32 syndrome = m->misc >> 32; -- cgit v0.10.2 From d522a0d17963e9c2e556db2cbd60c96d40505b6c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 18 Aug 2011 12:19:27 -0700 Subject: irqdesc: fix new kernel-doc warning Fix kernel-doc warning in irqdesc.c: Warning(kernel/irq/irqdesc.c:353): No description found for parameter 'owner' Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index cb65d03..039b889 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -344,6 +344,7 @@ EXPORT_SYMBOL_GPL(irq_free_descs); * @from: Start the search from this irq number * @cnt: Number of consecutive irqs to allocate. * @node: Preferred node on which the irq descriptor should be allocated + * @owner: Owning module (can be NULL) * * Returns the first irq number or error code */ -- cgit v0.10.2 From ebd1699ec5f1a6f1f2df6b48fa54bc6ff790143c Mon Sep 17 00:00:00 2001 From: Jeff Garzik Date: Thu, 18 Aug 2011 23:52:36 -0400 Subject: [libata] sata_sil: fix used-uninit warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Init 'serror' to silence the following warning: drivers/ata/sata_sil.c: In function ‘sil_interrupt’: drivers/ata/sata_sil.c:453:14: warning: ‘serror’ may be used uninitialized in this function [-Wuninitialized] This is not a 'can never happen' but is nonetheless extremely unlikely. The easiest and cleanest warning fix is simply to init the var, rather than worry about marking the var uninit-ok. Signed-off-by: Jeff Garzik diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c index 98c1d78..9dfb40b 100644 --- a/drivers/ata/sata_sil.c +++ b/drivers/ata/sata_sil.c @@ -438,7 +438,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2) u8 status; if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) { - u32 serror; + u32 serror = 0xffffffff; /* SIEN doesn't mask SATA IRQs on some 3112s. Those * controllers continue to assert IRQ as long as -- cgit v0.10.2 From 6d0e194d2eefcaab6dbdca1f639748660144acb5 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 4 Aug 2011 11:15:07 +0200 Subject: pata_via: disable ATAPI DMA on AVERATEC 3200 On AVERATEC 3200, pata_via causes memory corruption with ATAPI DMA, which often leads to random kernel oops. The cause of the problem is not well understood yet and only small subset of machines using the controller seem affected. Blacklist ATAPI DMA on the machine. Signed-off-by: Tejun Heo Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=11426 Reported-and-tested-by: Jim Bray Cc: Alan Cox Cc: stable@kernel.org Signed-off-by: Jeff Garzik diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 65e4be6..8e9f504 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -124,6 +124,17 @@ static const struct via_isa_bridge { { NULL } }; +static const struct dmi_system_id no_atapi_dma_dmi_table[] = { + { + .ident = "AVERATEC 3200", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AVERATEC"), + DMI_MATCH(DMI_BOARD_NAME, "3200"), + }, + }, + { } +}; + struct via_port { u8 cached_device; }; @@ -355,6 +366,13 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask) mask &= ~ ATA_MASK_UDMA; } } + + if (dev->class == ATA_DEV_ATAPI && + dmi_check_system(no_atapi_dma_dmi_table)) { + ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n"); + mask &= ATA_MASK_PIO; + } + return mask; } -- cgit v0.10.2 From e39c75cf3e045c2fb3988770b207dfd09c30d4ac Mon Sep 17 00:00:00 2001 From: "Arnaud Patard (Rtp)" Date: Tue, 26 Jul 2011 16:58:19 +0200 Subject: ata: Add iMX pata support Add basic support for pata on iMX. It has been tested only on imx51. SDMA support will probably be added later so this version supports only PIO. v2: - enable only when needed IORDY - use dev_get_drvdata v3: - add missing clk_put() calls - use platform_get_irq() - fix resume code to avoid disabling IORDY on resume v4: - Remove EXPERIMENTAL and switch to depends on ARCH_MXC - Use devm_kzalloc() - make clock a must-have - Use only 1 ioremap Signed-off-by: Arnaud Patard Signed-off-by: Jeff Garzik diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index ca3e6be..5987e0b 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -468,6 +468,15 @@ config PATA_ICSIDE interface card. This is not required for ICS partition support. If you are unsure, say N to this. +config PATA_IMX + tristate "PATA support for Freescale iMX" + depends on ARCH_MXC + help + This option enables support for the PATA host available on Freescale + iMX SoCs. + + If unsure, say N. + config PATA_IT8213 tristate "IT8213 PATA support (Experimental)" depends on PCI && EXPERIMENTAL diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 8ac64e1..9550d69 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o +obj-$(CONFIG_PATA_IMX) += pata_imx.o obj-$(CONFIG_PATA_IT8213) += pata_it8213.o obj-$(CONFIG_PATA_IT821X) += pata_it821x.o obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c new file mode 100644 index 0000000..ca9d9ca --- /dev/null +++ b/drivers/ata/pata_imx.c @@ -0,0 +1,253 @@ +/* + * Freescale iMX PATA driver + * + * Copyright (C) 2011 Arnaud Patard + * + * Based on pata_platform - Copyright (C) 2006 - 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * TODO: + * - dmaengine support + * - check if timing stuff needed + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "pata_imx" + +#define PATA_IMX_ATA_CONTROL 0x24 +#define PATA_IMX_ATA_CTRL_FIFO_RST_B (1<<7) +#define PATA_IMX_ATA_CTRL_ATA_RST_B (1<<6) +#define PATA_IMX_ATA_CTRL_IORDY_EN (1<<0) +#define PATA_IMX_ATA_INT_EN 0x2C +#define PATA_IMX_ATA_INTR_ATA_INTRQ2 (1<<3) +#define PATA_IMX_DRIVE_DATA 0xA0 +#define PATA_IMX_DRIVE_CONTROL 0xD8 + +struct pata_imx_priv { + struct clk *clk; + /* timings/interrupt/control regs */ + u8 *host_regs; + u32 ata_ctl; +}; + +static int pata_imx_set_mode(struct ata_link *link, struct ata_device **unused) +{ + struct ata_device *dev; + struct ata_port *ap = link->ap; + struct pata_imx_priv *priv = ap->host->private_data; + u32 val; + + ata_for_each_dev(dev, link, ENABLED) { + dev->pio_mode = dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; + + val = __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); + if (ata_pio_need_iordy(dev)) + val |= PATA_IMX_ATA_CTRL_IORDY_EN; + else + val &= ~PATA_IMX_ATA_CTRL_IORDY_EN; + __raw_writel(val, priv->host_regs + PATA_IMX_ATA_CONTROL); + + ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); + } + return 0; +} + +static struct scsi_host_template pata_imx_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +static struct ata_port_operations pata_imx_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_unknown, + .set_mode = pata_imx_set_mode, +}; + +static void pata_imx_setup_port(struct ata_ioports *ioaddr) +{ + /* Fixup the port shift for platforms that need it */ + ioaddr->data_addr = ioaddr->cmd_addr + (ATA_REG_DATA << 2); + ioaddr->error_addr = ioaddr->cmd_addr + (ATA_REG_ERR << 2); + ioaddr->feature_addr = ioaddr->cmd_addr + (ATA_REG_FEATURE << 2); + ioaddr->nsect_addr = ioaddr->cmd_addr + (ATA_REG_NSECT << 2); + ioaddr->lbal_addr = ioaddr->cmd_addr + (ATA_REG_LBAL << 2); + ioaddr->lbam_addr = ioaddr->cmd_addr + (ATA_REG_LBAM << 2); + ioaddr->lbah_addr = ioaddr->cmd_addr + (ATA_REG_LBAH << 2); + ioaddr->device_addr = ioaddr->cmd_addr + (ATA_REG_DEVICE << 2); + ioaddr->status_addr = ioaddr->cmd_addr + (ATA_REG_STATUS << 2); + ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); +} + +static int __devinit pata_imx_probe(struct platform_device *pdev) +{ + struct ata_host *host; + struct ata_port *ap; + struct pata_imx_priv *priv; + int irq = 0; + struct resource *io_res; + + io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (io_res == NULL) + return -EINVAL; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -EINVAL; + + priv = devm_kzalloc(&pdev->dev, + sizeof(struct pata_imx_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "Failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + clk_enable(priv->clk); + + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + goto free_priv; + + host->private_data = priv; + ap = host->ports[0]; + + ap->ops = &pata_imx_port_ops; + ap->pio_mask = ATA_PIO0; + ap->flags |= ATA_FLAG_SLAVE_POSS; + + priv->host_regs = devm_ioremap(&pdev->dev, io_res->start, + resource_size(io_res)); + if (!priv->host_regs) { + dev_err(&pdev->dev, "failed to map IO/CTL base\n"); + goto free_priv; + } + + ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA; + ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL; + + ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; + + pata_imx_setup_port(&ap->ioaddr); + + ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx", + (unsigned long long)io_res->start + PATA_IMX_DRIVE_DATA, + (unsigned long long)io_res->start + PATA_IMX_DRIVE_CONTROL); + + /* deassert resets */ + __raw_writel(PATA_IMX_ATA_CTRL_FIFO_RST_B | + PATA_IMX_ATA_CTRL_ATA_RST_B, + priv->host_regs + PATA_IMX_ATA_CONTROL); + /* enable interrupts */ + __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, + priv->host_regs + PATA_IMX_ATA_INT_EN); + + /* activate */ + return ata_host_activate(host, irq, ata_sff_interrupt, 0, + &pata_imx_sht); + +free_priv: + clk_disable(priv->clk); + clk_put(priv->clk); + return -ENOMEM; +} + +static int __devexit pata_imx_remove(struct platform_device *pdev) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct pata_imx_priv *priv = host->private_data; + + ata_host_detach(host); + + __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); + + clk_disable(priv->clk); + clk_put(priv->clk); + + return 0; +} + +#ifdef CONFIG_PM +static int pata_imx_suspend(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct pata_imx_priv *priv = host->private_data; + int ret; + + ret = ata_host_suspend(host, PMSG_SUSPEND); + if (!ret) { + __raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN); + priv->ata_ctl = + __raw_readl(priv->host_regs + PATA_IMX_ATA_CONTROL); + clk_disable(priv->clk); + } + + return ret; +} + +static int pata_imx_resume(struct device *dev) +{ + struct ata_host *host = dev_get_drvdata(dev); + struct pata_imx_priv *priv = host->private_data; + + clk_enable(priv->clk); + + __raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL); + + __raw_writel(PATA_IMX_ATA_INTR_ATA_INTRQ2, + priv->host_regs + PATA_IMX_ATA_INT_EN); + + ata_host_resume(host); + + return 0; +} + +static const struct dev_pm_ops pata_imx_pm_ops = { + .suspend = pata_imx_suspend, + .resume = pata_imx_resume, +}; +#endif + +static struct platform_driver pata_imx_driver = { + .probe = pata_imx_probe, + .remove = __devexit_p(pata_imx_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &pata_imx_pm_ops, +#endif + }, +}; + +static int __init pata_imx_init(void) +{ + return platform_driver_register(&pata_imx_driver); +} + +static void __exit pata_imx_exit(void) +{ + platform_driver_unregister(&pata_imx_driver); +} +module_init(pata_imx_init); +module_exit(pata_imx_exit); + +MODULE_AUTHOR("Arnaud Patard "); +MODULE_DESCRIPTION("low-level driver for iMX PATA"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRV_NAME); -- cgit v0.10.2 From a081da630d64acf132b2db1043c586b993d49da7 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 8 Aug 2011 13:17:57 +0200 Subject: drivers/ata/sata_dwc_460ex.c: add missing kfree Currently, error handling code in this function calls the function sata_dwc_port_stop, but this function has essentially no effect if hsdevp has not been stored in ap, which is the case throughout this function. The only effect is to print a debugging message including ap->print_id. The code is rewritten to not call sata_dwc_port_stop, but instead to jump to a local label that prints the original error message and the print_id information. In the case where hsdevp has been already allocated (but not yet stored in ap), this value is freed as well. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @exists@ local idexpression x; statement S,S1; expression E; identifier fl; expression *ptr != NULL; @@ x = \(kmalloc\|kzalloc\|kcalloc\)(...); ... if (x == NULL) S <... when != x when != if (...) { <+...kfree(x)...+> } when any when != true x == NULL x->fl ...> ( if (x == NULL) S1 | if (...) { ... when != x when forall ( return \(0\|<+...x...+>\|ptr\); | * return ...; ) } ) // Signed-off-by: Julia Lawall Signed-off-by: Jeff Garzik diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index 0a9a774..5c42374 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -1329,7 +1329,7 @@ static int sata_dwc_port_start(struct ata_port *ap) dev_err(ap->dev, "%s: dma_alloc_coherent failed\n", __func__); err = -ENOMEM; - goto CLEANUP; + goto CLEANUP_ALLOC; } } @@ -1349,15 +1349,13 @@ static int sata_dwc_port_start(struct ata_port *ap) /* Clear any error bits before libata starts issuing commands */ clear_serror(); ap->private_data = hsdevp; + dev_dbg(ap->dev, "%s: done\n", __func__); + return 0; +CLEANUP_ALLOC: + kfree(hsdevp); CLEANUP: - if (err) { - sata_dwc_port_stop(ap); - dev_dbg(ap->dev, "%s: fail\n", __func__); - } else { - dev_dbg(ap->dev, "%s: done\n", __func__); - } - + dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id); return err; } -- cgit v0.10.2 From c5114cd59d2664f258b0d021d79b1532d94bdc2b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 18 Aug 2011 21:29:27 -0700 Subject: vlan: reset headers on accel emulation path It's after all necessary to do reset headers here. The reason is we cannot depend that it gets reseted in __netif_receive_skb once skb is reinjected. For incoming vlanids without vlan_dev, vlan_do_receive() returns false with skb != NULL and __netif_reveive_skb continues, skb is not reinjected. This might be good material for 3.0-stable as well Reported-by: Mike Auty Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 5f27f8e..f1f2f7b 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -167,6 +167,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb) if (unlikely(!skb)) goto err_free; + skb_reset_network_header(skb); + skb_reset_transport_header(skb); return skb; err_free: -- cgit v0.10.2 From 2d5b2c5ca0d3ebe707386b3add365496460cf918 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 18 Aug 2011 21:30:37 -0700 Subject: net: netdev-features.txt update to Documentation/networking/00-INDEX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update netdev-features.txt entry in 00-INDEX to incorporate feedback by Michał Mirosław. v2: restored tabs that were inadvertently changed to spaces in v1. sorry for the error. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index 811252b..bbce121 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -135,7 +135,7 @@ multiqueue.txt netconsole.txt - The network console module netconsole.ko: configuration and notes. netdev-features.txt - - Network interface "feature mess and how to get out from it alive". + - Network interface features API description. netdevices.txt - info on network device driver functions exported to the kernel. netif-msg.txt -- cgit v0.10.2 From 4a0342ca8e8150bd47e7118a76e300692a1b6b7b Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Wed, 17 Aug 2011 22:14:57 +0000 Subject: sparc: fix array bounds error setting up PCIC NMI trap CC arch/sparc/kernel/pcic.o arch/sparc/kernel/pcic.c: In function 'pcic_probe': arch/sparc/kernel/pcic.c:359:33: error: array subscript is above array bounds [-Werror=array-bounds] arch/sparc/kernel/pcic.c:359:8: error: array subscript is above array bounds [-Werror=array-bounds] arch/sparc/kernel/pcic.c:360:33: error: array subscript is above array bounds [-Werror=array-bounds] arch/sparc/kernel/pcic.c:360:8: error: array subscript is above array bounds [-Werror=array-bounds] arch/sparc/kernel/pcic.c:361:33: error: array subscript is above array bounds [-Werror=array-bounds] arch/sparc/kernel/pcic.c:361:8: error: array subscript is above array bounds [-Werror=array-bounds] cc1: all warnings being treated as errors I'm not particularly familiar with sparc but t_nmi (defined in head_32.S via the TRAP_ENTRY macro) and pcic_nmi_trap_patch (defined in entry.S) both appear to be 4 instructions long and I presume from the usage that instructions are int sized. Signed-off-by: Ian Campbell Cc: "David S. Miller" Cc: sparclinux@vger.kernel.org Reviewed-by: Sam Ravnborg Signed-off-by: David S. Miller diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index a19f041..1aaf8c1 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -352,8 +352,8 @@ int __init pcic_probe(void) strcpy(pbm->prom_name, namebuf); { - extern volatile int t_nmi[1]; - extern int pcic_nmi_trap_patch[1]; + extern volatile int t_nmi[4]; + extern int pcic_nmi_trap_patch[4]; t_nmi[0] = pcic_nmi_trap_patch[0]; t_nmi[1] = pcic_nmi_trap_patch[1]; -- cgit v0.10.2 From 38b65190c6ab0be8ce7cff69e734ca5b5e7fa309 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 19 Aug 2011 07:55:10 +0200 Subject: ALSA: usb-audio - Fix missing mixer dB information The recent fix for testing dB range at the mixer creation time seems to cause regressions in some devices. In such devices, reading the dB info at probing time gives an error, thus both dBmin and dBmax are still zero, and TLV flag isn't set although the later read of dB info succeeds. This patch adds a workaround for such a case by assuming that the later read will succeed. In future, a similar test should be performed in a case where a wrong dB range is seen even in the later read. Signed-off-by: Takashi Iwai Cc: diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c04d7c7..cdd19d7 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -152,6 +152,7 @@ static inline void check_mapped_dB(const struct usbmix_name_map *p, if (p && p->dB) { cval->dBmin = p->dB->min; cval->dBmax = p->dB->max; + cval->initialized = 1; } } @@ -1092,7 +1093,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc, " Switch" : " Volume"); if (control == UAC_FU_VOLUME) { check_mapped_dB(map, cval); - if (cval->dBmin < cval->dBmax) { + if (cval->dBmin < cval->dBmax || !cval->initialized) { kctl->tlv.c = mixer_vol_tlv; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ | -- cgit v0.10.2 From b53d1ed734a2b9af8da115b836b658daa7d47a48 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 19 Aug 2011 08:34:48 +0200 Subject: Revert "cfq: Remove special treatment for metadata rqs." We have a kernel build regression since 3.1-rc1, which is about 10% regression. The kernel source is in an ext3 filesystem. Alex Shi bisect it to commit: commit a07405b7802691d29ab3b23bdc76ee6d006aad0b Author: Justin TerAvest Date: Sun Jul 10 22:09:19 2011 +0200 cfq: Remove special treatment for metadata rqs. Apparently this is caused by lack metadata preemption, where ext3/ext4 do use READ_META. I didn't see a way to fix the issue, so suggest reverting the patch. This reverts commit a07405b7802691d29ab3b23bdc76ee6d006aad0b. Reported-by: Alex Shi Reported-by: Shaohua Li Signed-off-by: Jens Axboe diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6508345..a33bd43 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -130,6 +130,8 @@ struct cfq_queue { unsigned long slice_end; long slice_resid; + /* pending metadata requests */ + int meta_pending; /* number of requests that are on the dispatch list or inside driver */ int dispatched; @@ -682,6 +684,9 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, if (rq_is_sync(rq1) != rq_is_sync(rq2)) return rq_is_sync(rq1) ? rq1 : rq2; + if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META) + return rq1->cmd_flags & REQ_META ? rq1 : rq2; + s1 = blk_rq_pos(rq1); s2 = blk_rq_pos(rq2); @@ -1607,6 +1612,10 @@ static void cfq_remove_request(struct request *rq) cfqq->cfqd->rq_queued--; cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq), rq_is_sync(rq)); + if (rq->cmd_flags & REQ_META) { + WARN_ON(!cfqq->meta_pending); + cfqq->meta_pending--; + } } static int cfq_merge(struct request_queue *q, struct request **req, @@ -3360,6 +3369,13 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, return true; /* + * So both queues are sync. Let the new request get disk time if + * it's a metadata request and the current queue is doing regular IO. + */ + if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) + return true; + + /* * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. */ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) @@ -3423,6 +3439,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_io_context *cic = RQ_CIC(rq); cfqd->rq_queued++; + if (rq->cmd_flags & REQ_META) + cfqq->meta_pending++; cfq_update_io_thinktime(cfqd, cfqq, cic); cfq_update_io_seektime(cfqd, cfqq, rq); -- cgit v0.10.2 From 98e77438aed3cd3343cbb86825127b1d9d2bea33 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Fri, 19 Aug 2011 03:19:07 -0700 Subject: ipv6: Fix ipv6_getsockopt for IPV6_2292PKTOPTIONS IPV6_2292PKTOPTIONS is broken for 32-bit applications running in COMPAT mode on 64-bit kernels. The same problem was fixed for IPv4 with the patch: ipv4: Fix ip_getsockopt for IP_PKTOPTIONS, commit dd23198e58cd35259dd09e8892bbdb90f1d57748 Signed-off-by: Sorin Dumitru Signed-off-by: Daniel Baluta Signed-off-by: David S. Miller diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 9cb191e..147ede38 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -913,7 +913,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, } static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) + char __user *optval, int __user *optlen, unsigned flags) { struct ipv6_pinfo *np = inet6_sk(sk); int len; @@ -962,7 +962,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, msg.msg_control = optval; msg.msg_controllen = len; - msg.msg_flags = 0; + msg.msg_flags = flags; lock_sock(sk); skb = np->pktoptions; @@ -1222,7 +1222,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, if(level != SOL_IPV6) return -ENOPROTOOPT; - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); + err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { @@ -1264,7 +1264,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, return compat_mc_getsockopt(sk, level, optname, optval, optlen, ipv6_getsockopt); - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen); + err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, + MSG_CMSG_COMPAT); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { -- cgit v0.10.2 From bb0822954aab7d23a3f902c2a103ee0242f6046e Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Tue, 16 Aug 2011 13:37:14 -0600 Subject: squeeze max-pause area and drop pass-good area Revert the pass-good area introduced in ffd1f609ab10 ("writeback: introduce max-pause and pass-good dirty limits") and make the max-pause area smaller and safe. This fixes ~30% performance regression in the ext3 data=writeback fio_mmap_randwrite_64k/fio_mmap_randrw_64k test cases, where there are 12 JBOD disks, on each disk runs 8 concurrent tasks doing reads+writes. Using deadline scheduler also has a regression, but not that big as CFQ, so this suggests we have some write starvation. The test logs show that - the disks are sometimes under utilized - global dirty pages sometimes rush high to the pass-good area for several hundred seconds, while in the mean time some bdi dirty pages drop to very low value (bdi_dirty << bdi_thresh). Then suddenly the global dirty pages dropped under global dirty threshold and bdi_dirty rush very high (for example, 2 times higher than bdi_thresh). During which time balance_dirty_pages() is not called at all. So the problems are 1) The random writes progress so slow that they break the assumption of the max-pause logic that "8 pages per 200ms is typically more than enough to curb heavy dirtiers". 2) The max-pause logic ignored task_bdi_thresh and thus opens the possibility for some bdi's to over dirty pages, leading to (bdi_dirty >> bdi_thresh) and then (bdi_thresh >> bdi_dirty) for others. 3) The higher max-pause/pass-good thresholds somehow leads to the bad swing of dirty pages. The fix is to allow the task to slightly dirty over task_bdi_thresh, but no way to exceed bdi_dirty and/or global dirty_thresh. Tests show that it fixed the JBOD regression completely (both behavior and performance), while still being able to cut down large pause times in balance_dirty_pages() for single-disk cases. Reported-by: Li Shaohua Tested-by: Li Shaohua Acked-by: Jan Kara Signed-off-by: Wu Fengguang diff --git a/include/linux/writeback.h b/include/linux/writeback.h index f1bfa12e..2b8963f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -12,15 +12,6 @@ * * (thresh - thresh/DIRTY_FULL_SCOPE, thresh) * - * The 1/16 region above the global dirty limit will be put to maximum pauses: - * - * (limit, limit + limit/DIRTY_MAXPAUSE_AREA) - * - * The 1/16 region above the max-pause region, dirty exceeded bdi's will be put - * to loops: - * - * (limit + limit/DIRTY_MAXPAUSE_AREA, limit + limit/DIRTY_PASSGOOD_AREA) - * * Further beyond, all dirtier tasks will enter a loop waiting (possibly long * time) for the dirty pages to drop, unless written enough pages. * @@ -31,8 +22,6 @@ */ #define DIRTY_SCOPE 8 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2) -#define DIRTY_MAXPAUSE_AREA 16 -#define DIRTY_PASSGOOD_AREA 8 /* * 4MB minimal write chunk size diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d196074..0e309cd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -754,21 +754,10 @@ static void balance_dirty_pages(struct address_space *mapping, * 200ms is typically more than enough to curb heavy dirtiers; * (b) the pause time limit makes the dirtiers more responsive. */ - if (nr_dirty < dirty_thresh + - dirty_thresh / DIRTY_MAXPAUSE_AREA && + if (nr_dirty < dirty_thresh && + bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && time_after(jiffies, start_time + MAX_PAUSE)) break; - /* - * pass-good area. When some bdi gets blocked (eg. NFS server - * not responding), or write bandwidth dropped dramatically due - * to concurrent reads, or dirty threshold suddenly dropped and - * the dirty pages cannot be brought down anytime soon (eg. on - * slow USB stick), at least let go of the good bdi's. - */ - if (nr_dirty < dirty_thresh + - dirty_thresh / DIRTY_PASSGOOD_AREA && - bdi_dirty < bdi_thresh) - break; /* * Increase the delay for each loop, up to our previous -- cgit v0.10.2 From 63b37de12889b7b96463b7d6de6d3f3704486b91 Mon Sep 17 00:00:00 2001 From: Dave Jones Date: Tue, 16 Aug 2011 15:36:21 -0400 Subject: cpupower: fix Makefile typo Signed-off-by: Dave Jones Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 11521d2..edb021c 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -35,7 +35,7 @@ NLS ?= true # Set the following to 'true' to build/install the # cpufreq-bench benchmarking tool -CPUFRQ_BENCH ?= true +CPUFREQ_BENCH ?= true # Prefix to the directories we're installing to DESTDIR ?= @@ -139,7 +139,7 @@ ifeq ($(strip $(NLS)),true) COMPILE_NLS += create-gmo endif -ifeq ($(strip $(CPUFRQ_BENCH)),true) +ifeq ($(strip $(CPUFREQ_BENCH)),true) INSTALL_BENCH += install-bench COMPILE_BENCH += compile-bench endif -- cgit v0.10.2 From 47c336307a3680cfdf4adbe718d79f3fe66702ea Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Fri, 19 Aug 2011 17:00:02 +0200 Subject: cpupower: make NLS truly optional Loosely based on a patch for cpufrequtils, submittted by Sergey Dryabzhinsky and signed-off-by: Matt Turner Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index edb021c..e8a03ac 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile @@ -137,6 +137,7 @@ CFLAGS += -pipe ifeq ($(strip $(NLS)),true) INSTALL_NLS += install-gmo COMPILE_NLS += create-gmo + CFLAGS += -DNLS endif ifeq ($(strip $(CPUFREQ_BENCH)),true) diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 7a83022..2747e73 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h @@ -16,11 +16,20 @@ #include "helpers/bitmask.h" /* Internationalization ****************************/ +#ifdef NLS + #define _(String) gettext(String) #ifndef gettext_noop #define gettext_noop(String) String #endif #define N_(String) gettext_noop(String) + +#else /* !NLS */ + +#define _(String) String +#define N_(String) String + +#endif /* Internationalization ****************************/ extern int run_as_root; -- cgit v0.10.2 From 498ca793d90aef8ad38a852a969c257f62832738 Mon Sep 17 00:00:00 2001 From: Dominik Brodowski Date: Sat, 6 Aug 2011 18:11:43 +0200 Subject: cpupower: use man(1) when calling "cpupower help subcommand" Instead of printing something non-formatted to stdout, call man(1) to show the man page for the proper subcommand. Signed-off-by: Dominik Brodowski diff --git a/tools/power/cpupower/man/cpupower-frequency-info.1 b/tools/power/cpupower/man/cpupower-frequency-info.1 index 3194811..bb60a8d 100644 --- a/tools/power/cpupower/man/cpupower-frequency-info.1 +++ b/tools/power/cpupower/man/cpupower-frequency-info.1 @@ -1,10 +1,10 @@ -.TH "cpufreq-info" "1" "0.1" "Mattia Dongili" "" +.TH "cpupower-frequency-info" "1" "0.1" "Mattia Dongili" "" .SH "NAME" .LP -cpufreq\-info \- Utility to retrieve cpufreq kernel information +cpupower frequency\-info \- Utility to retrieve cpufreq kernel information .SH "SYNTAX" .LP -cpufreq\-info [\fIoptions\fP] +cpupower [ \-c cpulist ] frequency\-info [\fIoptions\fP] .SH "DESCRIPTION" .LP A small tool which prints out cpufreq information helpful to developers and interested users. diff --git a/tools/power/cpupower/man/cpupower-frequency-set.1 b/tools/power/cpupower/man/cpupower-frequency-set.1 index 26e3e13..685f469 100644 --- a/tools/power/cpupower/man/cpupower-frequency-set.1 +++ b/tools/power/cpupower/man/cpupower-frequency-set.1 @@ -1,13 +1,13 @@ -.TH "cpufreq-set" "1" "0.1" "Mattia Dongili" "" +.TH "cpupower-freqency-set" "1" "0.1" "Mattia Dongili" "" .SH "NAME" .LP -cpufreq\-set \- A small tool which allows to modify cpufreq settings. +cpupower frequency\-set \- A small tool which allows to modify cpufreq settings. .SH "SYNTAX" .LP -cpufreq\-set [\fIoptions\fP] +cpupower [ \-c cpu ] frequency\-set [\fIoptions\fP] .SH "DESCRIPTION" .LP -cpufreq\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. +cpupower frequency\-set allows you to modify cpufreq settings without having to type e.g. "/sys/devices/system/cpu/cpu0/cpufreq/scaling_set_speed" all the time. .SH "OPTIONS" .LP .TP diff --git a/tools/power/cpupower/man/cpupower.1 b/tools/power/cpupower/man/cpupower.1 index 78c20fe..baf741d 100644 --- a/tools/power/cpupower/man/cpupower.1 +++ b/tools/power/cpupower/man/cpupower.1 @@ -3,7 +3,7 @@ cpupower \- Shows and sets processor power related values .SH SYNOPSIS .ft B -.B cpupower [ \-c cpulist ] subcommand [ARGS] +.B cpupower [ \-c cpulist ] [ARGS] .B cpupower \-v|\-\-version @@ -13,24 +13,24 @@ cpupower \- Shows and sets processor power related values \fBcpupower \fP is a collection of tools to examine and tune power saving related features of your processor. -The manpages of the subcommands (cpupower\-(1)) provide detailed +The manpages of the commands (cpupower\-(1)) provide detailed descriptions of supported features. Run \fBcpupower help\fP to get an overview -of supported subcommands. +of supported commands. .SH Options .PP \-\-help, \-h .RS 4 -Shows supported subcommands and general usage. +Shows supported commands and general usage. .RE .PP \-\-cpu cpulist, \-c cpulist .RS 4 Only show or set values for specific cores. -This option is not supported by all subcommands, details can be found in the -manpages of the subcommands. +This option is not supported by all commands, details can be found in the +manpages of the commands. -Some subcommands access all cores (typically the *\-set commands), some only +Some commands access all cores (typically the *\-set commands), some only the first core (typically the *\-info commands) by default. The syntax for is based on how the kernel exports CPU bitmasks via diff --git a/tools/power/cpupower/utils/builtin.h b/tools/power/cpupower/utils/builtin.h index c870ffb..c10496f 100644 --- a/tools/power/cpupower/utils/builtin.h +++ b/tools/power/cpupower/utils/builtin.h @@ -8,11 +8,4 @@ extern int cmd_freq_info(int argc, const char **argv); extern int cmd_idle_info(int argc, const char **argv); extern int cmd_monitor(int argc, const char **argv); -extern void set_help(void); -extern void info_help(void); -extern void freq_set_help(void); -extern void freq_info_help(void); -extern void idle_info_help(void); -extern void monitor_help(void); - #endif diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index 5a1d25f..28953c9 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c @@ -510,37 +510,6 @@ static int get_latency(unsigned int cpu, unsigned int human) return 0; } -void freq_info_help(void) -{ - printf(_("Usage: cpupower freqinfo [options]\n")); - printf(_("Options:\n")); - printf(_(" -e, --debug Prints out debug information [default]\n")); - printf(_(" -f, --freq Get frequency the CPU currently runs at, according\n" - " to the cpufreq core *\n")); - printf(_(" -w, --hwfreq Get frequency the CPU currently runs at, by reading\n" - " it from hardware (only available to root) *\n")); - printf(_(" -l, --hwlimits Determine the minimum and maximum CPU frequency allowed *\n")); - printf(_(" -d, --driver Determines the used cpufreq kernel driver *\n")); - printf(_(" -p, --policy Gets the currently used cpufreq policy *\n")); - printf(_(" -g, --governors Determines available cpufreq governors *\n")); - printf(_(" -r, --related-cpus Determines which CPUs run at the same hardware frequency *\n")); - printf(_(" -a, --affected-cpus Determines which CPUs need to have their frequency\n" - " coordinated by software *\n")); - printf(_(" -s, --stats Shows cpufreq statistics if available\n")); - printf(_(" -y, --latency Determines the maximum latency on CPU frequency changes *\n")); - printf(_(" -b, --boost Checks for turbo or boost modes *\n")); - printf(_(" -o, --proc Prints out information like provided by the /proc/cpufreq\n" - " interface in 2.4. and early 2.6. kernels\n")); - printf(_(" -m, --human human-readable output for the -f, -w, -s and -y parameters\n")); - printf(_(" -h, --help Prints out this screen\n")); - - printf("\n"); - printf(_("If no argument is given, full output about\n" - "cpufreq is printed which is useful e.g. for reporting bugs.\n\n")); - printf(_("By default info of CPU 0 is shown which can be overridden\n" - "with the cpupower --cpu main command option.\n")); -} - static struct option info_opts[] = { { .name = "debug", .has_arg = no_argument, .flag = NULL, .val = 'e'}, { .name = "boost", .has_arg = no_argument, .flag = NULL, .val = 'b'}, @@ -556,7 +525,6 @@ static struct option info_opts[] = { { .name = "latency", .has_arg = no_argument, .flag = NULL, .val = 'y'}, { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, { .name = "human", .has_arg = no_argument, .flag = NULL, .val = 'm'}, - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, { }, }; @@ -570,16 +538,12 @@ int cmd_freq_info(int argc, char **argv) int output_param = 0; do { - ret = getopt_long(argc, argv, "hoefwldpgrasmyb", info_opts, NULL); + ret = getopt_long(argc, argv, "oefwldpgrasmyb", info_opts, NULL); switch (ret) { case '?': output_param = '?'; cont = 0; break; - case 'h': - output_param = 'h'; - cont = 0; - break; case -1: cont = 0; break; @@ -642,11 +606,7 @@ int cmd_freq_info(int argc, char **argv) return -EINVAL; case '?': printf(_("invalid or unknown argument\n")); - freq_info_help(); return -EINVAL; - case 'h': - freq_info_help(); - return EXIT_SUCCESS; case 'o': proc_cpufreq_output(); return EXIT_SUCCESS; diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c index 5f78362..dd1539e 100644 --- a/tools/power/cpupower/utils/cpufreq-set.c +++ b/tools/power/cpupower/utils/cpufreq-set.c @@ -20,34 +20,11 @@ #define NORM_FREQ_LEN 32 -void freq_set_help(void) -{ - printf(_("Usage: cpupower frequency-set [options]\n")); - printf(_("Options:\n")); - printf(_(" -d FREQ, --min FREQ new minimum CPU frequency the governor may select\n")); - printf(_(" -u FREQ, --max FREQ new maximum CPU frequency the governor may select\n")); - printf(_(" -g GOV, --governor GOV new cpufreq governor\n")); - printf(_(" -f FREQ, --freq FREQ specific frequency to be set. Requires userspace\n" - " governor to be available and loaded\n")); - printf(_(" -r, --related Switches all hardware-related CPUs\n")); - printf(_(" -h, --help Prints out this screen\n")); - printf("\n"); - printf(_("Notes:\n" - "1. Omitting the -c or --cpu argument is equivalent to setting it to \"all\"\n")); - printf(_("2. The -f FREQ, --freq FREQ parameter cannot be combined with any other parameter\n" - " except the -c CPU, --cpu CPU parameter\n" - "3. FREQuencies can be passed in Hz, kHz (default), MHz, GHz, or THz\n" - " by postfixing the value with the wanted unit name, without any space\n" - " (FREQuency in kHz =^ Hz * 0.001 =^ MHz * 1000 =^ GHz * 1000000).\n")); - -} - static struct option set_opts[] = { { .name = "min", .has_arg = required_argument, .flag = NULL, .val = 'd'}, { .name = "max", .has_arg = required_argument, .flag = NULL, .val = 'u'}, { .name = "governor", .has_arg = required_argument, .flag = NULL, .val = 'g'}, { .name = "freq", .has_arg = required_argument, .flag = NULL, .val = 'f'}, - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, { .name = "related", .has_arg = no_argument, .flag = NULL, .val='r'}, { }, }; @@ -80,7 +57,6 @@ const struct freq_units def_units[] = { static void print_unknown_arg(void) { printf(_("invalid or unknown argument\n")); - freq_set_help(); } static unsigned long string_to_frequency(const char *str) @@ -231,14 +207,11 @@ int cmd_freq_set(int argc, char **argv) /* parameter parsing */ do { - ret = getopt_long(argc, argv, "d:u:g:f:hr", set_opts, NULL); + ret = getopt_long(argc, argv, "d:u:g:f:r", set_opts, NULL); switch (ret) { case '?': print_unknown_arg(); return -EINVAL; - case 'h': - freq_set_help(); - return 0; case -1: cont = 0; break; diff --git a/tools/power/cpupower/utils/cpuidle-info.c b/tools/power/cpupower/utils/cpuidle-info.c index 70da357..b028267 100644 --- a/tools/power/cpupower/utils/cpuidle-info.c +++ b/tools/power/cpupower/utils/cpuidle-info.c @@ -139,30 +139,14 @@ static void proc_cpuidle_cpu_output(unsigned int cpu) } } -/* --freq / -f */ - -void idle_info_help(void) -{ - printf(_ ("Usage: cpupower idleinfo [options]\n")); - printf(_ ("Options:\n")); - printf(_ (" -s, --silent Only show general C-state information\n")); - printf(_ (" -o, --proc Prints out information like provided by the /proc/acpi/processor/*/power\n" - " interface in older kernels\n")); - printf(_ (" -h, --help Prints out this screen\n")); - - printf("\n"); -} - static struct option info_opts[] = { { .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'}, { .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'}, - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, { }, }; static inline void cpuidle_exit(int fail) { - idle_info_help(); exit(EXIT_FAILURE); } @@ -174,7 +158,7 @@ int cmd_idle_info(int argc, char **argv) unsigned int cpu = 0; do { - ret = getopt_long(argc, argv, "hos", info_opts, NULL); + ret = getopt_long(argc, argv, "os", info_opts, NULL); if (ret == -1) break; switch (ret) { @@ -182,10 +166,6 @@ int cmd_idle_info(int argc, char **argv) output_param = '?'; cont = 0; break; - case 'h': - output_param = 'h'; - cont = 0; - break; case 's': verbose = 0; break; @@ -211,8 +191,6 @@ int cmd_idle_info(int argc, char **argv) case '?': printf(_("invalid or unknown argument\n")); cpuidle_exit(EXIT_FAILURE); - case 'h': - cpuidle_exit(EXIT_SUCCESS); } /* Default is: show output of CPU 0 only */ diff --git a/tools/power/cpupower/utils/cpupower-info.c b/tools/power/cpupower/utils/cpupower-info.c index 85253cb..3f68632 100644 --- a/tools/power/cpupower/utils/cpupower-info.c +++ b/tools/power/cpupower/utils/cpupower-info.c @@ -16,31 +16,16 @@ #include "helpers/helpers.h" #include "helpers/sysfs.h" -void info_help(void) -{ - printf(_("Usage: cpupower info [ -b ] [ -m ] [ -s ]\n")); - printf(_("Options:\n")); - printf(_(" -b, --perf-bias Gets CPU's power vs performance policy on some\n" - " Intel models [0-15], see manpage for details\n")); - printf(_(" -m, --sched-mc Gets the kernel's multi core scheduler policy.\n")); - printf(_(" -s, --sched-smt Gets the kernel's thread sibling scheduler policy.\n")); - printf(_(" -h, --help Prints out this screen\n")); - printf(_("\nPassing no option will show all info, by default only on core 0\n")); - printf("\n"); -} - static struct option set_opts[] = { { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, { }, }; static void print_wrong_arg_exit(void) { printf(_("invalid or unknown argument\n")); - info_help(); exit(EXIT_FAILURE); } @@ -64,11 +49,8 @@ int cmd_info(int argc, char **argv) textdomain(PACKAGE); /* parameter parsing */ - while ((ret = getopt_long(argc, argv, "msbh", set_opts, NULL)) != -1) { + while ((ret = getopt_long(argc, argv, "msb", set_opts, NULL)) != -1) { switch (ret) { - case 'h': - info_help(); - return 0; case 'b': if (params.perf_bias) print_wrong_arg_exit(); diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c index bc1b391..dc4de37 100644 --- a/tools/power/cpupower/utils/cpupower-set.c +++ b/tools/power/cpupower/utils/cpupower-set.c @@ -17,30 +17,16 @@ #include "helpers/sysfs.h" #include "helpers/bitmask.h" -void set_help(void) -{ - printf(_("Usage: cpupower set [ -b val ] [ -m val ] [ -s val ]\n")); - printf(_("Options:\n")); - printf(_(" -b, --perf-bias [VAL] Sets CPU's power vs performance policy on some\n" - " Intel models [0-15], see manpage for details\n")); - printf(_(" -m, --sched-mc [VAL] Sets the kernel's multi core scheduler policy.\n")); - printf(_(" -s, --sched-smt [VAL] Sets the kernel's thread sibling scheduler policy.\n")); - printf(_(" -h, --help Prints out this screen\n")); - printf("\n"); -} - static struct option set_opts[] = { { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, - { .name = "help", .has_arg = no_argument, .flag = NULL, .val = 'h'}, { }, }; static void print_wrong_arg_exit(void) { printf(_("invalid or unknown argument\n")); - set_help(); exit(EXIT_FAILURE); } @@ -66,12 +52,9 @@ int cmd_set(int argc, char **argv) params.params = 0; /* parameter parsing */ - while ((ret = getopt_long(argc, argv, "m:s:b:h", + while ((ret = getopt_long(argc, argv, "m:s:b:", set_opts, NULL)) != -1) { switch (ret) { - case 'h': - set_help(); - return 0; case 'b': if (params.perf_bias) print_wrong_arg_exit(); @@ -110,10 +93,8 @@ int cmd_set(int argc, char **argv) } }; - if (!params.params) { - set_help(); - return -EINVAL; - } + if (!params.params) + print_wrong_arg_exit(); if (params.sched_mc) { ret = sysfs_set_sched("mc", sched_mc); diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c index 5844ae0..52bee59 100644 --- a/tools/power/cpupower/utils/cpupower.c +++ b/tools/power/cpupower/utils/cpupower.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "builtin.h" #include "helpers/helpers.h" @@ -19,13 +20,12 @@ struct cmd_struct { const char *cmd; int (*main)(int, const char **); - void (*usage)(void); int needs_root; }; #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) -int cmd_help(int argc, const char **argv); +static int cmd_help(int argc, const char **argv); /* Global cpu_info object available for all binaries * Info only retrieved from CPU 0 @@ -44,55 +44,66 @@ int be_verbose; static void print_help(void); static struct cmd_struct commands[] = { - { "frequency-info", cmd_freq_info, freq_info_help, 0 }, - { "frequency-set", cmd_freq_set, freq_set_help, 1 }, - { "idle-info", cmd_idle_info, idle_info_help, 0 }, - { "set", cmd_set, set_help, 1 }, - { "info", cmd_info, info_help, 0 }, - { "monitor", cmd_monitor, monitor_help, 0 }, - { "help", cmd_help, print_help, 0 }, - /* { "bench", cmd_bench, NULL, 1 }, */ + { "frequency-info", cmd_freq_info, 0 }, + { "frequency-set", cmd_freq_set, 1 }, + { "idle-info", cmd_idle_info, 0 }, + { "set", cmd_set, 1 }, + { "info", cmd_info, 0 }, + { "monitor", cmd_monitor, 0 }, + { "help", cmd_help, 0 }, + /* { "bench", cmd_bench, 1 }, */ }; -int cmd_help(int argc, const char **argv) -{ - unsigned int i; - - if (argc > 1) { - for (i = 0; i < ARRAY_SIZE(commands); i++) { - struct cmd_struct *p = commands + i; - if (strcmp(p->cmd, argv[1])) - continue; - if (p->usage) { - p->usage(); - return EXIT_SUCCESS; - } - } - } - print_help(); - if (argc == 1) - return EXIT_SUCCESS; /* cpupower help */ - return EXIT_FAILURE; -} - static void print_help(void) { unsigned int i; #ifdef DEBUG - printf(_("cpupower [ -d ][ -c cpulist ] subcommand [ARGS]\n")); - printf(_(" -d, --debug May increase output (stderr) on some subcommands\n")); + printf(_("Usage:\tcpupower [-d|--debug] [-c|--cpu cpulist ] []\n")); #else - printf(_("cpupower [ -c cpulist ] subcommand [ARGS]\n")); + printf(_("Usage:\tcpupower [-c|--cpu cpulist ] []\n")); #endif - printf(_("cpupower --version\n")); - printf(_("Supported subcommands are:\n")); + printf(_("Supported commands are:\n")); for (i = 0; i < ARRAY_SIZE(commands); i++) printf("\t%s\n", commands[i].cmd); - printf(_("\nSome subcommands can make use of the -c cpulist option.\n")); - printf(_("Look at the general cpupower manpage how to use it\n")); - printf(_("and read up the subcommand's manpage whether it is supported.\n")); - printf(_("\nUse cpupower help subcommand for getting help for above subcommands.\n")); + printf(_("\nNot all commands can make use of the -c cpulist option.\n")); + printf(_("\nUse 'cpupower help ' for getting help for above commands.\n")); +} + +static int print_man_page(const char *subpage) +{ + int len; + char *page; + + len = 10; /* enough for "cpupower-" */ + if (subpage != NULL) + len += strlen(subpage); + + page = malloc(len); + if (!page) + return -ENOMEM; + + sprintf(page, "cpupower"); + if ((subpage != NULL) && strcmp(subpage, "help")) { + strcat(page, "-"); + strcat(page, subpage); + } + + execlp("man", "man", page, NULL); + + /* should not be reached */ + return -EINVAL; +} + +static int cmd_help(int argc, const char **argv) +{ + if (argc > 1) { + print_man_page(argv[1]); /* exits within execlp() */ + return EXIT_FAILURE; + } + + print_help(); + return EXIT_SUCCESS; } static void print_version(void) diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c index 6cb8d9e..0d6571e 100644 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c @@ -43,6 +43,12 @@ static struct cpupower_topology cpu_top; /* ToDo: Document this in the manpage */ static char range_abbr[RANGE_MAX] = { 'T', 'C', 'P', 'M', }; +static void print_wrong_arg_exit(void) +{ + printf(_("invalid or unknown argument\n")); + exit(EXIT_FAILURE); +} + long long timespec_diff_us(struct timespec start, struct timespec end) { struct timespec temp; @@ -56,21 +62,6 @@ long long timespec_diff_us(struct timespec start, struct timespec end) return (temp.tv_sec * 1000000) + (temp.tv_nsec / 1000); } -void monitor_help(void) -{ - printf(_("cpupower monitor: [-m ,[],.. ] command\n")); - printf(_("cpupower monitor: [-m ,[],.. ] [ -i interval_sec ]\n")); - printf(_("cpupower monitor: -l\n")); - printf(_("\t command: pass an arbitrary command to measure specific workload\n")); - printf(_("\t -i: time intervall to measure for in seconds (default 1)\n")); - printf(_("\t -l: list available CPU sleep monitors (for use with -m)\n")); - printf(_("\t -m: show specific CPU sleep monitors only (in same order)\n")); - printf(_("\t -h: print this help\n")); - printf("\n"); - printf(_("only one of: -l, -m are allowed\nIf none of them is passed,")); - printf(_(" all supported monitors are shown\n")); -} - void print_n_spaces(int n) { int x; @@ -246,7 +237,6 @@ static void parse_monitor_param(char *param) if (hits == 0) { printf(_("No matching monitor found in %s, " "try -l option\n"), param); - monitor_help(); exit(EXIT_FAILURE); } /* Override detected/registerd monitors array with requested one */ @@ -343,37 +333,27 @@ static void cmdline(int argc, char *argv[]) int opt; progname = basename(argv[0]); - while ((opt = getopt(argc, argv, "+hli:m:")) != -1) { + while ((opt = getopt(argc, argv, "+li:m:")) != -1) { switch (opt) { - case 'h': - monitor_help(); - exit(EXIT_SUCCESS); case 'l': - if (mode) { - monitor_help(); - exit(EXIT_FAILURE); - } + if (mode) + print_wrong_arg_exit(); mode = list; break; case 'i': /* only allow -i with -m or no option */ - if (mode && mode != show) { - monitor_help(); - exit(EXIT_FAILURE); - } + if (mode && mode != show) + print_wrong_arg_exit(); interval = atoi(optarg); break; case 'm': - if (mode) { - monitor_help(); - exit(EXIT_FAILURE); - } + if (mode) + print_wrong_arg_exit(); mode = show; show_monitors_param = optarg; break; default: - monitor_help(); - exit(EXIT_FAILURE); + print_wrong_arg_exit(); } } if (!mode) -- cgit v0.10.2 From 69566dd8be42dea7a22f625abc96e65bb4b45d1f Mon Sep 17 00:00:00 2001 From: David Daney Date: Tue, 16 Aug 2011 11:24:37 -0700 Subject: PCI: OF: Don't crash when bridge parent is NULL. In pcibios_get_phb_of_node(), we will crash while booting if bus->bridge->parent is NULL. Check for this case and avoid dereferencing the NULL pointer. Signed-off-by: David Daney Acked-by: Benjamin Herrenschmidt Acked-by: Grant Likely Signed-off-by: Jesse Barnes diff --git a/drivers/pci/of.c b/drivers/pci/of.c index c94d37e..f092993 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -55,7 +55,7 @@ struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) */ if (bus->bridge->of_node) return of_node_get(bus->bridge->of_node); - if (bus->bridge->parent->of_node) + if (bus->bridge->parent && bus->bridge->parent->of_node) return of_node_get(bus->bridge->parent->of_node); return NULL; } -- cgit v0.10.2 From d555ab6bb321814853ca8a8d4e8e22d52e18a871 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 29 Jul 2011 21:11:43 -0700 Subject: max8998_charger: Needs module.h power/max8998_charger.c uses interfaces from linux/module.h, so it should include that file. This fixes build errors. Signed-off-by: Randy Dunlap Signed-off-by: Anton Vorontsov diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c index cc21fa2..ef8efad 100644 --- a/drivers/power/max8998_charger.c +++ b/drivers/power/max8998_charger.c @@ -20,6 +20,7 @@ */ #include +#include #include #include #include -- cgit v0.10.2 From 71aa79a8c2537eb07cd26b5e4dc43274a9c10692 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Mon, 1 Aug 2011 07:29:31 +0800 Subject: max8997_charger: Needs module.h power/max8997_charger.c uses interfaces from linux/module.h, so it should include that file. This fixes build errors. Signed-off-by: Axel Lin Acked-by: MyungJoo Ham Signed-off-by: Anton Vorontsov diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c index 7106b49..ffc5033 100644 --- a/drivers/power/max8997_charger.c +++ b/drivers/power/max8997_charger.c @@ -20,6 +20,7 @@ */ #include +#include #include #include #include -- cgit v0.10.2 From 815efa1eab5b0c3e071e5d6df0cc2d7e0c7e6fd7 Mon Sep 17 00:00:00 2001 From: Vasily Khoruzhick Date: Fri, 12 Aug 2011 17:55:18 +0300 Subject: s3c-adc-battery: Fix compilation error due to missing header (module.h) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add linux/module.h to fix this compilation error: drivers/power/s3c_adc_battery.c:435:15: error: expected declaration specifiers or ‘...’ before string constant drivers/power/s3c_adc_battery.c:435:1: warning: data definition has no type or storage class drivers/power/s3c_adc_battery.c:435:1: warning: type defaults to ‘int’ in declaration of ‘MODULE_AUTHOR’ drivers/power/s3c_adc_battery.c:435:15: warning: function declaration isn’t a prototype drivers/power/s3c_adc_battery.c:436:20: error: expected declaration specifiers or ‘...’ before string constant drivers/power/s3c_adc_battery.c:436:1: warning: data definition has no type or storage class drivers/power/s3c_adc_battery.c:436:1: warning: type defaults to ‘int’ in declaration of ‘MODULE_DESCRIPTION’ drivers/power/s3c_adc_battery.c:436:20: warning: function declaration isn’t a prototype drivers/power/s3c_adc_battery.c:437:16: error: expected declaration specifiers or ‘...’ before string constant drivers/power/s3c_adc_battery.c:437:1: warning: data definition has no type or storage class drivers/power/s3c_adc_battery.c:437:1: warning: type defaults to ‘int’ in declaration of ‘MODULE_LICENSE’ drivers/power/s3c_adc_battery.c:437:16: warning: function declaration isn’t a prototype make[2]: *** [drivers/power/s3c_adc_battery.o] Error 1 Signed-off-by: Vasily Khoruzhick Signed-off-by: Ian Lartey Signed-off-by: Anton Vorontsov diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c index a675e31..d32d0d7 100644 --- a/drivers/power/s3c_adc_battery.c +++ b/drivers/power/s3c_adc_battery.c @@ -20,6 +20,7 @@ #include #include #include +#include #include -- cgit v0.10.2 From b095cd0a0ccdbc00c9fd99d90b22f8563687971f Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 12 Aug 2011 15:28:32 -0700 Subject: drm/i915: set GFX_MODE to pre-Ivybridge default value even on Ivybridge Prior to Ivybridge, the GFX_MODE would default to 0x800, meaning that MI_FLUSH would flush the TLBs in addition to the rest of the caches indicated in the MI_FLUSH command. However starting with Ivybridge, the register defaults to 0x2800 out of reset, meaning that to invalidate the TLB we need to use PIPE_CONTROL. Since we're not doing that yet, go back to the old default so things work. v2: don't forget to actually *clear* the new bit Reviewed-by: Eric Anholt Reviewed-by: Chris Wilson Tested-by: Kenneth Graunke Signed-off-by: Jesse Barnes diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5baaef4..542453f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -375,6 +375,7 @@ # define MI_FLUSH_ENABLE (1 << 11) #define GFX_MODE 0x02520 +#define GFX_MODE_GEN7 0x0229c #define GFX_RUN_LIST_ENABLE (1<<15) #define GFX_TLB_INVALIDATE_ALWAYS (1<<13) #define GFX_SURFACE_FAULT_ENABLE (1<<12) @@ -382,6 +383,9 @@ #define GFX_PSMI_GRANULARITY (1<<10) #define GFX_PPGTT_ENABLE (1<<9) +#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit)) +#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0)) + #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 47b9b27..c30626e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -290,6 +290,10 @@ static int init_render_ring(struct intel_ring_buffer *ring) if (IS_GEN6(dev) || IS_GEN7(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; I915_WRITE(MI_MODE, mode); + if (IS_GEN7(dev)) + I915_WRITE(GFX_MODE_GEN7, + GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | + GFX_MODE_ENABLE(GFX_REPLAY_MODE)); } if (INTEL_INFO(dev)->gen >= 6) { -- cgit v0.10.2 From dccaf33fa37a1bc5d651baeb3bfeb6becb86597b Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Fri, 19 Aug 2011 19:13:32 -0400 Subject: ext4: flush any pending end_io requests before DIO reads w/dioread_nolock There is a race between ext4 buffer write and direct_IO read with dioread_nolock mount option enabled. The problem is that we clear PageWriteback flag during end_io time but will do uninitialized-to-initialized extent conversion later with dioread_nolock. If an O_direct read request comes in during this period, ext4 will return zero instead of the recently written data. This patch checks whether there are any pending uninitialized-to-initialized extent conversion requests before doing O_direct read to close the race. Note that this is just a bandaid fix. The fundamental issue is that we clear PageWriteback flag before we really complete an IO, which is problem-prone. To fix the fundamental issue, we may need to implement an extent tree cache that we can use to look up pending to-be-converted extents. Signed-off-by: Jiaying Zhang Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b8602cd..0962642 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -800,12 +800,17 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, } retry: - if (rw == READ && ext4_should_dioread_nolock(inode)) + if (rw == READ && ext4_should_dioread_nolock(inode)) { + if (unlikely(!list_empty(&ei->i_completed_io_list))) { + mutex_lock(&inode->i_mutex); + ext4_flush_completed_IO(inode); + mutex_unlock(&inode->i_mutex); + } ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext4_get_block, NULL, NULL, 0); - else { + } else { ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, ext4_get_block); -- cgit v0.10.2 From b6acf013bdc6f6ff9643030add85832d44034a28 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sat, 20 Aug 2011 09:14:45 +0200 Subject: ALSA: hda - Don't spew too many ELD errors Currently HD-audio driver shows the all error ELD byte as an error in the kernel message. This is annoying when the video driver doesn't set the correct ELD from the beginning. e.g. radeon sends a zero-byte data, but we still check ELD with the fixed 128 byte as a workaround for some broken devices, it spews 128-times errors. For avoiding this, the driver aborts reading when the first byte is invalid. In such a case, the whole data is certainly invalid. Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 28ce17d..c34f730 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -144,25 +144,17 @@ static int cea_sampling_frequencies[8] = { SNDRV_PCM_RATE_192000, /* 7: 192000Hz */ }; -static unsigned char hdmi_get_eld_byte(struct hda_codec *codec, hda_nid_t nid, +static unsigned int hdmi_get_eld_data(struct hda_codec *codec, hda_nid_t nid, int byte_index) { unsigned int val; val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_ELDD, byte_index); - #ifdef BE_PARANOID printk(KERN_INFO "HDMI: ELD data byte %d: 0x%x\n", byte_index, val); #endif - - if ((val & AC_ELDD_ELD_VALID) == 0) { - snd_printd(KERN_INFO "HDMI: invalid ELD data byte %d\n", - byte_index); - val = 0; - } - - return val & AC_ELDD_ELD_DATA; + return val; } #define GRAB_BITS(buf, byte, lowbit, bits) \ @@ -344,11 +336,26 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld, if (!buf) return -ENOMEM; - for (i = 0; i < size; i++) - buf[i] = hdmi_get_eld_byte(codec, nid, i); + for (i = 0; i < size; i++) { + unsigned int val = hdmi_get_eld_data(codec, nid, i); + if (!(val & AC_ELDD_ELD_VALID)) { + if (!i) { + snd_printd(KERN_INFO + "HDMI: invalid ELD data\n"); + ret = -EINVAL; + goto error; + } + snd_printd(KERN_INFO + "HDMI: invalid ELD data byte %d\n", i); + val = 0; + } else + val &= AC_ELDD_ELD_DATA; + buf[i] = val; + } ret = hdmi_update_eld(eld, buf, size); +error: kfree(buf); return ret; } -- cgit v0.10.2 From 1b004d03d8670bdd871e0f297ed20bc510e404de Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sat, 20 Aug 2011 09:19:59 +0200 Subject: ALSA: hda - Fix error check from snd_hda_get_conn_index() in patch_cirrus.c snd_hda_get_conn_index() returns a negative value while the current code stores it in an unsigned int. It must be stored in a signed integer. Reported-by: Jesper Juhl Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 47d6ffc..d6c93d9 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -375,7 +375,7 @@ static int is_ext_mic(struct hda_codec *codec, unsigned int idx) static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin, unsigned int *idxp) { - int i; + int i, idx; hda_nid_t nid; nid = codec->start_nid; @@ -384,9 +384,11 @@ static hda_nid_t get_adc(struct hda_codec *codec, hda_nid_t pin, type = get_wcaps_type(get_wcaps(codec, nid)); if (type != AC_WID_AUD_IN) continue; - *idxp = snd_hda_get_conn_index(codec, nid, pin, false); - if (*idxp >= 0) + idx = snd_hda_get_conn_index(codec, nid, pin, false); + if (idx >= 0) { + *idxp = idx; return nid; + } } return 0; } -- cgit v0.10.2 From de75577c8c3ab733f808c65e1a9d55882efde68e Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 20 Aug 2011 08:12:41 +0200 Subject: ALSA: sound/aoa/fabrics/layout.c: remove unneeded kfree The label outnodev is only used when kzalloc has not yet taken place or has failed, so there is no need for the call for kfree under this label. A simplified version of the semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @@ identifier x; expression E1!=0,E2,E3,E4; statement S; iterator I; @@ ( if (...) { ... when != kfree(x) when != x = E3 when != E3 = x * return ...; } ... when != x = E2 when != I(...,x,...) S if (...) { ... when != x = E4 kfree(x); ... return ...; } ) // Signed-off-by: Julia Lawall Signed-off-by: Takashi Iwai diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c index 3fd1a7e..552b97a 100644 --- a/sound/aoa/fabrics/layout.c +++ b/sound/aoa/fabrics/layout.c @@ -1073,10 +1073,10 @@ static int aoa_fabric_layout_probe(struct soundbus_dev *sdev) sdev->pcmid = -1; list_del(&ldev->list); layouts_list_items--; + kfree(ldev); outnodev: of_node_put(sound); layout_device = NULL; - kfree(ldev); return -ENODEV; } -- cgit v0.10.2 From fbe5e29ec1886967255e76946aaf537b8cc9b81e Mon Sep 17 00:00:00 2001 From: Daniel Schwierzeck Date: Fri, 19 Aug 2011 12:04:20 +0000 Subject: atm: br2684: Fix oops due to skb->dev being NULL This oops have been already fixed with commit 27141666b69f535a4d63d7bc6d9e84ee5032f82a atm: [br2684] Fix oops due to skb->dev being NULL It happens that if a packet arrives in a VC between the call to open it on the hardware and the call to change the backend to br2684, br2684_regvcc processes the packet and oopses dereferencing skb->dev because it is NULL before the call to br2684_push(). but have been introduced again with commit b6211ae7f2e56837c6a4849316396d1535606e90 atm: Use SKB queue and list helpers instead of doing it by-hand. Signed-off-by: Daniel Schwierzeck Signed-off-by: David S. Miller diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 52cfd0c..d07223c 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -558,12 +558,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) spin_unlock_irqrestore(&rq->lock, flags); skb_queue_walk_safe(&queue, skb, tmp) { - struct net_device *dev = skb->dev; + struct net_device *dev; + + br2684_push(atmvcc, skb); + dev = skb->dev; dev->stats.rx_bytes -= skb->len; dev->stats.rx_packets--; - - br2684_push(atmvcc, skb); } /* initialize netdev carrier state */ -- cgit v0.10.2 From d70d43d7d719ab709af7df109e706e804fe21834 Mon Sep 17 00:00:00 2001 From: Jiejing Zhang Date: Sat, 20 Aug 2011 14:38:01 -0700 Subject: Input: max11801_ts - correct license statement The original license statement was confusing since it was unclear if the license was pure GPLv2 or GPLv2+ and did not match the license of the driver max11801_ts was derived from. The license is GPLv2+. Signed-off-by: Jiejing Zhang Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c index 4f2713d..4627fe5 100644 --- a/drivers/input/touchscreen/max11801_ts.c +++ b/drivers/input/touchscreen/max11801_ts.c @@ -9,7 +9,8 @@ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. */ /* -- cgit v0.10.2 From 5598473a5b40c47a8c5349dd2c2630797169cf1a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 20 Aug 2011 17:14:54 -0700 Subject: sparc: Allow handling signals when stack is corrupted. If we can't push the pending register windows onto the user's stack, we disallow signal delivery even if the signal would be delivered on a valid seperate signal stack. Add a register window save area in the signal frame, and store any unsavable windows there. On sigreturn, if any windows are still queued up in the signal frame, try to push them back onto the stack and if that fails we kill the process immediately. This allows the debug/tst-longjmp_chk2 glibc test case to pass. Signed-off-by: David S. Miller diff --git a/arch/sparc/include/asm/sigcontext.h b/arch/sparc/include/asm/sigcontext.h index a1607d1..69914d7 100644 --- a/arch/sparc/include/asm/sigcontext.h +++ b/arch/sparc/include/asm/sigcontext.h @@ -45,6 +45,19 @@ typedef struct { int si_mask; } __siginfo32_t; +#define __SIGC_MAXWIN 7 + +typedef struct { + unsigned long locals[8]; + unsigned long ins[8]; +} __siginfo_reg_window; + +typedef struct { + int wsaved; + __siginfo_reg_window reg_window[__SIGC_MAXWIN]; + unsigned long rwbuf_stkptrs[__SIGC_MAXWIN]; +} __siginfo_rwin_t; + #ifdef CONFIG_SPARC64 typedef struct { unsigned int si_float_regs [64]; @@ -73,6 +86,7 @@ struct sigcontext { unsigned long ss_size; } sigc_stack; unsigned long sigc_mask; + __siginfo_rwin_t * sigc_rwin_save; }; #else diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index b90b4a1..cb85458 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o obj-y += process_$(BITS).o obj-y += signal_$(BITS).o +obj-y += sigutil_$(BITS).o obj-$(CONFIG_SPARC32) += ioport.o obj-y += setup_$(BITS).o obj-y += idprom.o diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 75fad42..1ba95af 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -29,6 +29,8 @@ #include #include +#include "sigutil.h" + #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) /* This magic should be in g_upper[0] for all upper parts @@ -44,14 +46,14 @@ typedef struct { struct signal_frame32 { struct sparc_stackf32 ss; __siginfo32_t info; - /* __siginfo_fpu32_t * */ u32 fpu_save; + /* __siginfo_fpu_t * */ u32 fpu_save; unsigned int insns[2]; unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ siginfo_extra_v8plus_t v8plus; - __siginfo_fpu_t fpu_state; -}; + /* __siginfo_rwin_t * */u32 rwin_save; +} __attribute__((aligned(8))); typedef struct compat_siginfo{ int si_signo; @@ -110,18 +112,14 @@ struct rt_signal_frame32 { compat_siginfo_t info; struct pt_regs32 regs; compat_sigset_t mask; - /* __siginfo_fpu32_t * */ u32 fpu_save; + /* __siginfo_fpu_t * */ u32 fpu_save; unsigned int insns[2]; stack_t32 stack; unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ siginfo_extra_v8plus_t v8plus; - __siginfo_fpu_t fpu_state; -}; - -/* Align macros */ -#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15))) -#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15))) + /* __siginfo_rwin_t * */u32 rwin_save; +} __attribute__((aligned(8))); int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) { @@ -192,30 +190,13 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return 0; } -static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err; - - err = __get_user(fprs, &fpu->si_fprs); - fprs_write(0); - regs->tstate &= ~TSTATE_PEF; - if (fprs & FPRS_DL) - err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32)); - err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); - current_thread_info()->fpsaved[0] |= fprs; - return err; -} - void do_sigreturn32(struct pt_regs *regs) { struct signal_frame32 __user *sf; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; unsigned int psr; - unsigned pc, npc, fpu_save; + unsigned pc, npc; sigset_t set; unsigned seta[_COMPAT_NSIG_WORDS]; int err, i; @@ -273,8 +254,13 @@ void do_sigreturn32(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) - err |= restore_fpu_state32(regs, &sf->fpu_state); + if (!err && fpu_save) + err |= restore_fpu_state(regs, compat_ptr(fpu_save)); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(compat_ptr(rwin_save))) + goto segv; + } err |= __get_user(seta[0], &sf->info.si_mask); err |= copy_from_user(seta+1, &sf->extramask, (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); @@ -300,7 +286,9 @@ segv: asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) { struct rt_signal_frame32 __user *sf; - unsigned int psr, pc, npc, fpu_save, u_ss_sp; + unsigned int psr, pc, npc, u_ss_sp; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; mm_segment_t old_fs; sigset_t set; compat_sigset_t seta; @@ -359,8 +347,8 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) - err |= restore_fpu_state32(regs, &sf->fpu_state); + if (!err && fpu_save) + err |= restore_fpu_state(regs, compat_ptr(fpu_save)); err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t)); err |= __get_user(u_ss_sp, &sf->stack.ss_sp); st.ss_sp = compat_ptr(u_ss_sp); @@ -376,6 +364,12 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf); set_fs(old_fs); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(compat_ptr(rwin_save))) + goto segv; + } + switch (_NSIG_WORDS) { case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32); case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32); @@ -433,26 +427,6 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns return (void __user *) sp; } -static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err = 0; - - fprs = current_thread_info()->fpsaved[0]; - if (fprs & FPRS_DL) - err |= copy_to_user(&fpu->si_float_regs[0], fpregs, - (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, - (sizeof(unsigned int) * 32)); - err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); - err |= __put_user(fprs, &fpu->si_fprs); - - return err; -} - /* The I-cache flush instruction only works in the primary ASI, which * right now is the nucleus, aka. kernel space. * @@ -515,18 +489,23 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset) { struct signal_frame32 __user *sf; + int i, err, wsaved; + void __user *tail; int sigframe_size; u32 psr; - int i, err; unsigned int seta[_COMPAT_NSIG_WORDS]; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); - sigframe_size = SF_ALIGNEDSZ; - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = get_thread_wsaved(); + + sigframe_size = sizeof(*sf); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct signal_frame32 __user *) get_sigframe(&ka->sa, regs, sigframe_size); @@ -534,8 +513,7 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, if (invalid_frame_pointer(sf, sigframe_size)) goto sigill; - if (get_thread_wsaved() != 0) - goto sigill; + tail = (sf + 1); /* 2. Save the current process state */ if (test_thread_flag(TIF_32BIT)) { @@ -560,11 +538,22 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, &sf->v8plus.asi); if (psr & PSR_EF) { - err |= save_fpu_state32(regs, &sf->fpu_state); - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user((u64)fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user((u64)rwp, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } switch (_NSIG_WORDS) { case 4: seta[7] = (oldset->sig[3] >> 32); @@ -580,10 +569,21 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, err |= __copy_to_user(sf->extramask, seta + 1, (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); - err |= copy_in_user((u32 __user *)sf, - (u32 __user *)(regs->u_regs[UREG_FP]), - sizeof(struct reg_window32)); - + if (!wsaved) { + err |= copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + for (i = 0; i < 8; i++) + err |= __put_user(rp->locals[i], &sf->ss.locals[i]); + for (i = 0; i < 6; i++) + err |= __put_user(rp->ins[i], &sf->ss.ins[i]); + err |= __put_user(rp->ins[6], &sf->ss.fp); + err |= __put_user(rp->ins[7], &sf->ss.callers_pc); + } if (err) goto sigsegv; @@ -613,7 +613,6 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ if (err) goto sigsegv; - flush_signal_insns(address); } return 0; @@ -632,18 +631,23 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, siginfo_t *info) { struct rt_signal_frame32 __user *sf; + int i, err, wsaved; + void __user *tail; int sigframe_size; u32 psr; - int i, err; compat_sigset_t seta; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); - sigframe_size = RT_ALIGNEDSZ; - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = get_thread_wsaved(); + + sigframe_size = sizeof(*sf); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame32 __user *) get_sigframe(&ka->sa, regs, sigframe_size); @@ -651,8 +655,7 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, if (invalid_frame_pointer(sf, sigframe_size)) goto sigill; - if (get_thread_wsaved() != 0) - goto sigill; + tail = (sf + 1); /* 2. Save the current process state */ if (test_thread_flag(TIF_32BIT)) { @@ -677,11 +680,22 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, &sf->v8plus.asi); if (psr & PSR_EF) { - err |= save_fpu_state32(regs, &sf->fpu_state); - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user((u64)fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user((u64)rwp, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } /* Update the siginfo structure. */ err |= copy_siginfo_to_user32(&sf->info, info); @@ -703,9 +717,21 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, } err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); - err |= copy_in_user((u32 __user *)sf, - (u32 __user *)(regs->u_regs[UREG_FP]), - sizeof(struct reg_window32)); + if (!wsaved) { + err |= copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + for (i = 0; i < 8; i++) + err |= __put_user(rp->locals[i], &sf->ss.locals[i]); + for (i = 0; i < 6; i++) + err |= __put_user(rp->ins[i], &sf->ss.ins[i]); + err |= __put_user(rp->ins[6], &sf->ss.fp); + err |= __put_user(rp->ins[7], &sf->ss.callers_pc); + } if (err) goto sigsegv; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 5e5c5fd..04ede8f 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -26,6 +26,8 @@ #include #include /* flush_sig_insns */ +#include "sigutil.h" + #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) extern void fpsave(unsigned long *fpregs, unsigned long *fsr, @@ -39,8 +41,8 @@ struct signal_frame { unsigned long insns[2] __attribute__ ((aligned (8))); unsigned int extramask[_NSIG_WORDS - 1]; unsigned int extra_size; /* Should be 0 */ - __siginfo_fpu_t fpu_state; -}; + __siginfo_rwin_t __user *rwin_save; +} __attribute__((aligned(8))); struct rt_signal_frame { struct sparc_stackf ss; @@ -51,8 +53,8 @@ struct rt_signal_frame { unsigned int insns[2]; stack_t stack; unsigned int extra_size; /* Should be 0 */ - __siginfo_fpu_t fpu_state; -}; + __siginfo_rwin_t __user *rwin_save; +} __attribute__((aligned(8))); /* Align macros */ #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) @@ -79,43 +81,13 @@ asmlinkage int sys_sigsuspend(old_sigset_t set) return _sigpause_common(set); } -static inline int -restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - int err; -#ifdef CONFIG_SMP - if (test_tsk_thread_flag(current, TIF_USEDFPU)) - regs->psr &= ~PSR_EF; -#else - if (current == last_task_used_math) { - last_task_used_math = NULL; - regs->psr &= ~PSR_EF; - } -#endif - set_used_math(); - clear_tsk_thread_flag(current, TIF_USEDFPU); - - if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) - return -EFAULT; - - err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], - (sizeof(unsigned long) * 32)); - err |= __get_user(current->thread.fsr, &fpu->si_fsr); - err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); - if (current->thread.fpqdepth != 0) - err |= __copy_from_user(¤t->thread.fpqueue[0], - &fpu->si_fpqueue[0], - ((sizeof(unsigned long) + - (sizeof(unsigned long *)))*16)); - return err; -} - asmlinkage void do_sigreturn(struct pt_regs *regs) { struct signal_frame __user *sf; unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; int err; /* Always make any pending restarted system calls return -EINTR */ @@ -150,9 +122,11 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) err |= restore_fpu_state(regs, fpu_save); + err |= __get_user(rwin_save, &sf->rwin_save); + if (rwin_save) + err |= restore_rwin_state(rwin_save); /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. @@ -180,6 +154,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) struct rt_signal_frame __user *sf; unsigned int psr, pc, npc; __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; mm_segment_t old_fs; sigset_t set; stack_t st; @@ -207,8 +182,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - - if (fpu_save) + if (!err && fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); @@ -228,6 +202,12 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); set_fs(old_fs); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(rwin_save)) + goto segv; + } + sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; @@ -280,53 +260,23 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re return (void __user *) sp; } -static inline int -save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - int err = 0; -#ifdef CONFIG_SMP - if (test_tsk_thread_flag(current, TIF_USEDFPU)) { - put_psr(get_psr() | PSR_EF); - fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, - ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); - regs->psr &= ~(PSR_EF); - clear_tsk_thread_flag(current, TIF_USEDFPU); - } -#else - if (current == last_task_used_math) { - put_psr(get_psr() | PSR_EF); - fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, - ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); - last_task_used_math = NULL; - regs->psr &= ~(PSR_EF); - } -#endif - err |= __copy_to_user(&fpu->si_float_regs[0], - ¤t->thread.float_regs[0], - (sizeof(unsigned long) * 32)); - err |= __put_user(current->thread.fsr, &fpu->si_fsr); - err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); - if (current->thread.fpqdepth != 0) - err |= __copy_to_user(&fpu->si_fpqueue[0], - ¤t->thread.fpqueue[0], - ((sizeof(unsigned long) + - (sizeof(unsigned long *)))*16)); - clear_used_math(); - return err; -} - static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset) { struct signal_frame __user *sf; - int sigframe_size, err; + int sigframe_size, err, wsaved; + void __user *tail; /* 1. Make sure everything is clean */ synchronize_user_stack(); - sigframe_size = SF_ALIGNEDSZ; - if (!used_math()) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = current_thread_info()->w_saved; + + sigframe_size = sizeof(*sf); + if (used_math()) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct signal_frame __user *) get_sigframe(&ka->sa, regs, sigframe_size); @@ -334,8 +284,7 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, if (invalid_frame_pointer(sf, sigframe_size)) goto sigill_and_return; - if (current_thread_info()->w_saved != 0) - goto sigill_and_return; + tail = sf + 1; /* 2. Save the current process state */ err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); @@ -343,17 +292,34 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= __put_user(0, &sf->extra_size); if (used_math()) { - err |= save_fpu_state(regs, &sf->fpu_state); - err |= __put_user(&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user(fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user(rwp, &sf->rwin_save); + } else { + err |= __put_user(0, &sf->rwin_save); + } err |= __put_user(oldset->sig[0], &sf->info.si_mask); err |= __copy_to_user(sf->extramask, &oldset->sig[1], (_NSIG_WORDS - 1) * sizeof(unsigned int)); - err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], - sizeof(struct reg_window32)); + if (!wsaved) { + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], + sizeof(struct reg_window32)); + } else { + struct reg_window32 *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); + } if (err) goto sigsegv; @@ -399,21 +365,24 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset, siginfo_t *info) { struct rt_signal_frame __user *sf; - int sigframe_size; + int sigframe_size, wsaved; + void __user *tail; unsigned int psr; int err; synchronize_user_stack(); - sigframe_size = RT_ALIGNEDSZ; - if (!used_math()) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = current_thread_info()->w_saved; + sigframe_size = sizeof(*sf); + if (used_math()) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame __user *) get_sigframe(&ka->sa, regs, sigframe_size); if (invalid_frame_pointer(sf, sigframe_size)) goto sigill; - if (current_thread_info()->w_saved != 0) - goto sigill; + tail = sf + 1; err = __put_user(regs->pc, &sf->regs.pc); err |= __put_user(regs->npc, &sf->regs.npc); err |= __put_user(regs->y, &sf->regs.y); @@ -425,11 +394,21 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= __put_user(0, &sf->extra_size); if (psr & PSR_EF) { - err |= save_fpu_state(regs, &sf->fpu_state); - err |= __put_user(&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user(fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user(rwp, &sf->rwin_save); + } else { + err |= __put_user(0, &sf->rwin_save); + } err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); /* Setup sigaltstack */ @@ -437,8 +416,15 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); - err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], - sizeof(struct reg_window32)); + if (!wsaved) { + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], + sizeof(struct reg_window32)); + } else { + struct reg_window32 *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); + } err |= copy_siginfo_to_user(&sf->info, info); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 006fe45..47509df 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -34,6 +34,7 @@ #include "entry.h" #include "systbls.h" +#include "sigutil.h" #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) @@ -236,7 +237,7 @@ struct rt_signal_frame { __siginfo_fpu_t __user *fpu_save; stack_t stack; sigset_t mask; - __siginfo_fpu_t fpu_state; + __siginfo_rwin_t *rwin_save; }; static long _sigpause_common(old_sigset_t set) @@ -266,33 +267,12 @@ asmlinkage long sys_sigsuspend(old_sigset_t set) return _sigpause_common(set); } -static inline int -restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err; - - err = __get_user(fprs, &fpu->si_fprs); - fprs_write(0); - regs->tstate &= ~TSTATE_PEF; - if (fprs & FPRS_DL) - err |= copy_from_user(fpregs, &fpu->si_float_regs[0], - (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], - (sizeof(unsigned int) * 32)); - err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); - current_thread_info()->fpsaved[0] |= fprs; - return err; -} - void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; sigset_t set; int err; @@ -325,8 +305,8 @@ void do_rt_sigreturn(struct pt_regs *regs) regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) - err |= restore_fpu_state(regs, &sf->fpu_state); + if (!err && fpu_save) + err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); @@ -334,6 +314,12 @@ void do_rt_sigreturn(struct pt_regs *regs) if (err) goto segv; + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(rwin_save)) + goto segv; + } + regs->tpc = tpc; regs->tnpc = tnpc; @@ -351,34 +337,13 @@ segv: } /* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp, int fplen) +static int invalid_frame_pointer(void __user *fp) { if (((unsigned long) fp) & 15) return 1; return 0; } -static inline int -save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err = 0; - - fprs = current_thread_info()->fpsaved[0]; - if (fprs & FPRS_DL) - err |= copy_to_user(&fpu->si_float_regs[0], fpregs, - (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, - (sizeof(unsigned int) * 32)); - err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); - err |= __put_user(fprs, &fpu->si_fprs); - - return err; -} - static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; @@ -414,34 +379,48 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, int signo, sigset_t *oldset, siginfo_t *info) { struct rt_signal_frame __user *sf; - int sigframe_size, err; + int wsaved, err, sf_size; + void __user *tail; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); - sigframe_size = sizeof(struct rt_signal_frame); - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = get_thread_wsaved(); + sf_size = sizeof(struct rt_signal_frame); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sf_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sf_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame __user *) - get_sigframe(ka, regs, sigframe_size); - - if (invalid_frame_pointer (sf, sigframe_size)) - goto sigill; + get_sigframe(ka, regs, sf_size); - if (get_thread_wsaved() != 0) + if (invalid_frame_pointer (sf)) goto sigill; + tail = (sf + 1); + /* 2. Save the current process state */ err = copy_to_user(&sf->regs, regs, sizeof (*regs)); if (current_thread_info()->fpsaved[0] & FPRS_FEF) { - err |= save_fpu_state(regs, &sf->fpu_state); - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fpu_save = tail; + tail += sizeof(__siginfo_fpu_t); + err |= save_fpu_state(regs, fpu_save); + err |= __put_user((u64)fpu_save, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwin_save = tail; + tail += sizeof(__siginfo_rwin_t); + err |= save_rwin_state(wsaved, rwin_save); + err |= __put_user((u64)rwin_save, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } /* Setup sigaltstack */ err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); @@ -450,10 +429,17 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); - err |= copy_in_user((u64 __user *)sf, - (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS), - sizeof(struct reg_window)); + if (!wsaved) { + err |= copy_in_user((u64 __user *)sf, + (u64 __user *)(regs->u_regs[UREG_FP] + + STACK_BIAS), + sizeof(struct reg_window)); + } else { + struct reg_window *rp; + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= copy_to_user(sf, rp, sizeof(struct reg_window)); + } if (info) err |= copy_siginfo_to_user(&sf->info, info); else { diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h new file mode 100644 index 0000000..d223aa4 --- /dev/null +++ b/arch/sparc/kernel/sigutil.h @@ -0,0 +1,9 @@ +#ifndef _SIGUTIL_H +#define _SIGUTIL_H + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin); +int restore_rwin_state(__siginfo_rwin_t __user *rp); + +#endif /* _SIGUTIL_H */ diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c new file mode 100644 index 0000000..35c7897 --- /dev/null +++ b/arch/sparc/kernel/sigutil_32.c @@ -0,0 +1,120 @@ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "sigutil.h" + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + int err = 0; +#ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) { + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); + regs->psr &= ~(PSR_EF); + clear_tsk_thread_flag(current, TIF_USEDFPU); + } +#else + if (current == last_task_used_math) { + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); + last_task_used_math = NULL; + regs->psr &= ~(PSR_EF); + } +#endif + err |= __copy_to_user(&fpu->si_float_regs[0], + ¤t->thread.float_regs[0], + (sizeof(unsigned long) * 32)); + err |= __put_user(current->thread.fsr, &fpu->si_fsr); + err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); + if (current->thread.fpqdepth != 0) + err |= __copy_to_user(&fpu->si_fpqueue[0], + ¤t->thread.fpqueue[0], + ((sizeof(unsigned long) + + (sizeof(unsigned long *)))*16)); + clear_used_math(); + return err; +} + +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + int err; +#ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) + regs->psr &= ~PSR_EF; +#else + if (current == last_task_used_math) { + last_task_used_math = NULL; + regs->psr &= ~PSR_EF; + } +#endif + set_used_math(); + clear_tsk_thread_flag(current, TIF_USEDFPU); + + if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) + return -EFAULT; + + err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], + (sizeof(unsigned long) * 32)); + err |= __get_user(current->thread.fsr, &fpu->si_fsr); + err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); + if (current->thread.fpqdepth != 0) + err |= __copy_from_user(¤t->thread.fpqueue[0], + &fpu->si_fpqueue[0], + ((sizeof(unsigned long) + + (sizeof(unsigned long *)))*16)); + return err; +} + +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) +{ + int i, err = __put_user(wsaved, &rwin->wsaved); + + for (i = 0; i < wsaved; i++) { + struct reg_window32 *rp; + unsigned long fp; + + rp = ¤t_thread_info()->reg_window[i]; + fp = current_thread_info()->rwbuf_stkptrs[i]; + err |= copy_to_user(&rwin->reg_window[i], rp, + sizeof(struct reg_window32)); + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); + } + return err; +} + +int restore_rwin_state(__siginfo_rwin_t __user *rp) +{ + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + + __get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + + err = 0; + for (i = 0; i < wsaved; i++) { + err |= copy_from_user(&t->reg_window[i], + &rp->reg_window[i], + sizeof(struct reg_window32)); + err |= __get_user(t->rwbuf_stkptrs[i], + &rp->rwbuf_stkptrs[i]); + } + if (err) + return err; + + t->w_saved = wsaved; + synchronize_user_stack(); + if (t->w_saved) + return -EFAULT; + return 0; + +} diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c new file mode 100644 index 0000000..e7dc508 --- /dev/null +++ b/arch/sparc/kernel/sigutil_64.c @@ -0,0 +1,93 @@ +#include +#include +#include +#include + +#include +#include +#include + +#include "sigutil.h" + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + int err = 0; + + fprs = current_thread_info()->fpsaved[0]; + if (fprs & FPRS_DL) + err |= copy_to_user(&fpu->si_float_regs[0], fpregs, + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, + (sizeof(unsigned int) * 32)); + err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); + err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); + err |= __put_user(fprs, &fpu->si_fprs); + + return err; +} + +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + int err; + + err = __get_user(fprs, &fpu->si_fprs); + fprs_write(0); + regs->tstate &= ~TSTATE_PEF; + if (fprs & FPRS_DL) + err |= copy_from_user(fpregs, &fpu->si_float_regs[0], + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], + (sizeof(unsigned int) * 32)); + err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); + err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); + current_thread_info()->fpsaved[0] |= fprs; + return err; +} + +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) +{ + int i, err = __put_user(wsaved, &rwin->wsaved); + + for (i = 0; i < wsaved; i++) { + struct reg_window *rp = ¤t_thread_info()->reg_window[i]; + unsigned long fp = current_thread_info()->rwbuf_stkptrs[i]; + + err |= copy_to_user(&rwin->reg_window[i], rp, + sizeof(struct reg_window)); + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); + } + return err; +} + +int restore_rwin_state(__siginfo_rwin_t __user *rp) +{ + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + + __get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + + err = 0; + for (i = 0; i < wsaved; i++) { + err |= copy_from_user(&t->reg_window[i], + &rp->reg_window[i], + sizeof(struct reg_window)); + err |= __get_user(t->rwbuf_stkptrs[i], + &rp->rwbuf_stkptrs[i]); + } + if (err) + return err; + + set_thread_wsaved(wsaved); + synchronize_user_stack(); + if (get_thread_wsaved()) + return -EFAULT; + return 0; +} -- cgit v0.10.2 From 47c08f3107270e5a439bc0106a308f7c48c9621d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 20 Aug 2011 11:49:43 -0700 Subject: pci: fix new kernel-doc warning in pci.c Fix new kernel-doc warning in pci.c: Warning(drivers/pci/pci.c:3259): No description found for parameter 'mps' Warning(drivers/pci/pci.c:3259): Excess function parameter 'rq' description in 'pcie_set_mps' Signed-off-by: Randy Dunlap Cc: Jesse Barnes Signed-off-by: Linus Torvalds diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 466fad6..0ce6742 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3250,7 +3250,7 @@ int pcie_get_mps(struct pci_dev *dev) /** * pcie_set_mps - set PCI Express maximum payload size * @dev: PCI device to query - * @rq: maximum payload size in bytes + * @mps: maximum payload size in bytes * valid values are 128, 256, 512, 1024, 2048, 4096 * * If possible sets maximum payload size -- cgit v0.10.2 From 6719db6a23d4b7f1e5052eedae394135e3aef9c1 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Sat, 20 Aug 2011 08:29:51 -0400 Subject: Btrfs: fix 64 bit divide problem This fixes a regression introduced by commit cdcb725c05fe ("Btrfs: check if there is enough space for balancing smarter"). We can't do 64-bit divides on 32-bit architectures. In cases where we need to divide/multiply by 2 we should just left/right shift respectively, and in cases where theres N number of devices use do_div. Also make the counters u64 to match up with rw_devices. Thanks, Signed-off-by: Josef Bacik Acked-and-tested-by: Ingo Molnar Signed-off-by: Linus Torvalds diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 80d6148..f5be06a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6735,9 +6735,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; struct btrfs_device *device; u64 min_free; + u64 dev_min = 1; + u64 dev_nr = 0; int index; - int dev_nr = 0; - int dev_min = 1; int full = 0; int ret = 0; @@ -6796,14 +6796,16 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) index = get_block_group_index(block_group); if (index == 0) { dev_min = 4; - min_free /= 2; + /* Divide by 2 */ + min_free >>= 1; } else if (index == 1) { dev_min = 2; } else if (index == 2) { - min_free *= 2; + /* Multiply by 2 */ + min_free <<= 1; } else if (index == 3) { dev_min = fs_devices->rw_devices; - min_free /= dev_min; + do_div(min_free, dev_min); } mutex_lock(&root->fs_info->chunk_mutex); -- cgit v0.10.2 From 2782a35132339574b06ce30556eb9f97eb1d26cd Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 21 Aug 2011 12:48:04 -0700 Subject: Input: tnetv107x-ts - add missing include of linux/module.h tnetv107x-ts.c uses interfaces from linux/module.h, so it should include that file. This patch fixes build errors. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c index 089b0a0..0e8f63e 100644 --- a/drivers/input/touchscreen/tnetv107x-ts.c +++ b/drivers/input/touchscreen/tnetv107x-ts.c @@ -13,6 +13,7 @@ * GNU General Public License for more details. */ +#include #include #include #include -- cgit v0.10.2 From b9cc510b395543cb7dba89c76421d23ed9e85f95 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Sun, 21 Aug 2011 12:48:08 -0700 Subject: Input: ep93xx_keypad - add missing include of linux/module.h ep93xx_keypad.c uses interfaces from linux/module.h, so it should include that file. This patch fixes build errors. Signed-off-by: Axel Lin Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index c8242dd..aa17e02 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -20,6 +20,7 @@ * flag. */ +#include #include #include #include -- cgit v0.10.2 From ffb57c4b8612c31204b06713770f6df4b8a94e4f Mon Sep 17 00:00:00 2001 From: Jay Estabrook Date: Wed, 6 Jul 2011 23:57:13 +0000 Subject: drm/radeon/alpha: Add Alpha support to Radeon DRM code Alpha needs to have available the system bus address for the Radeon's local memory, so that it can be used in ttm_bo_vm_fault(), when building the PTEs for accessing that VRAM. So, we make bus.addr hold the ioremap() return, and then we can modify bus.base appropriately for use during page fault processing. Signed-off-by: Jay Estabrook Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 60125dd..9b86fb0 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -450,6 +450,29 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ return -EINVAL; mem->bus.base = rdev->mc.aper_base; mem->bus.is_iomem = true; +#ifdef __alpha__ + /* + * Alpha: use bus.addr to hold the ioremap() return, + * so we can modify bus.base below. + */ + if (mem->placement & TTM_PL_FLAG_WC) + mem->bus.addr = + ioremap_wc(mem->bus.base + mem->bus.offset, + mem->bus.size); + else + mem->bus.addr = + ioremap_nocache(mem->bus.base + mem->bus.offset, + mem->bus.size); + + /* + * Alpha: Use just the bus offset plus + * the hose/domain memory base for bus.base. + * It then can be used to build PTEs for VRAM + * access, as done in ttm_bo_vm_fault(). + */ + mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + + rdev->ddev->hose->dense_mem_base; +#endif break; default: return -EINVAL; -- cgit v0.10.2 From 24cae9e7c9537fd6a16bc2f5ec398ee4bef5d007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 19 Aug 2011 15:24:16 +0000 Subject: drm/radeon: Take IH ring into account for test size calculation. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index dee4a0c..1ebd0fe 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -40,10 +40,14 @@ void radeon_test_moves(struct radeon_device *rdev) size = 1024 * 1024; /* Number of tests = - * (Total GTT - IB pool - writeback page - ring buffer) / test size + * (Total GTT - IB pool - writeback page - ring buffers) / test size */ - n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - - rdev->cp.ring_size)) / size; + n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size; + if (rdev->wb.wb_obj) + n -= RADEON_GPU_PAGE_SIZE; + if (rdev->ih.ring_obj) + n -= rdev->ih.ring_size; + n /= size; gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); if (!gtt_obj) { -- cgit v0.10.2 From 4fb1a35c0185f8fa3e71b12de62b8752a9a9ed0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 19 Aug 2011 15:24:17 +0000 Subject: drm/radeon: Explicitly print GTT/VRAM offsets on test failure. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise these would need to be painstakingly calculated looking at the source code. Signed-off-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 1ebd0fe..602fa35 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -136,9 +136,15 @@ void radeon_test_moves(struct radeon_device *rdev) gtt_start++, vram_start++) { if (*vram_start != gtt_start) { DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " - "expected 0x%p (GTT map 0x%p-0x%p)\n", - i, *vram_start, gtt_start, gtt_map, - gtt_end); + "expected 0x%p (GTT/VRAM offset " + "0x%16llx/0x%16llx)\n", + i, *vram_start, gtt_start, + (unsigned long long) + (gtt_addr - rdev->mc.gtt_start + + (void*)gtt_start - gtt_map), + (unsigned long long) + (vram_addr - rdev->mc.vram_start + + (void*)gtt_start - gtt_map)); radeon_bo_kunmap(vram_obj); goto out_cleanup; } @@ -179,9 +185,15 @@ void radeon_test_moves(struct radeon_device *rdev) gtt_start++, vram_start++) { if (*gtt_start != vram_start) { DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " - "expected 0x%p (VRAM map 0x%p-0x%p)\n", - i, *gtt_start, vram_start, vram_map, - vram_end); + "expected 0x%p (VRAM/GTT offset " + "0x%16llx/0x%16llx)\n", + i, *gtt_start, vram_start, + (unsigned long long) + (vram_addr - rdev->mc.vram_start + + (void*)vram_start - vram_map), + (unsigned long long) + (gtt_addr - rdev->mc.gtt_start + + (void*)vram_start - vram_map)); radeon_bo_kunmap(gtt_obj[i]); goto out_cleanup; } -- cgit v0.10.2 From ba95c45a78d57ac05bf45d81b92a6ec4d299695d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Fri, 19 Aug 2011 15:24:18 +0000 Subject: drm/radeon: Make vramlimit parameter actually work. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Michel Dänzer Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a3b011b..b51e157 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -301,6 +301,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size) + mc->real_vram_size = radeon_vram_limit; dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); -- cgit v0.10.2 From 3c05c4bed4ccce3f22f6d7899b308faae24ad198 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Wed, 17 Aug 2011 15:15:00 +0200 Subject: xen: Do not enable PV IPIs when vector callback not present Fix regression for HVM case on older (<4.1.1) hypervisors caused by commit 99bbb3a84a99cd04ab16b998b20f01a72cfa9f4f Author: Stefano Stabellini Date: Thu Dec 2 17:55:10 2010 +0000 xen: PV on HVM: support PV spinlocks and IPIs This change replaced the SMP operations with event based handlers without taking into account that this only works when the hypervisor supports callback vectors. This causes unexplainable hangs early on boot for HVM guests with more than one CPU. BugLink: http://bugs.launchpad.net/bugs/791850 CC: stable@kernel.org Signed-off-by: Stefan Bader Signed-off-by: Stefano Stabellini Tested-and-Reported-by: Stefan Bader Signed-off-by: Konrad Rzeszutek Wilk diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index b4533a8..e79dbb9 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -521,8 +521,6 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) native_smp_prepare_cpus(max_cpus); WARN_ON(xen_smp_intr_init(0)); - if (!xen_have_vector_callback) - return; xen_init_lock_cpu(0); xen_init_spinlocks(); } @@ -546,6 +544,8 @@ static void xen_hvm_cpu_die(unsigned int cpu) void __init xen_hvm_smp_init(void) { + if (!xen_have_vector_callback) + return; smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; smp_ops.smp_send_reschedule = xen_smp_send_reschedule; smp_ops.cpu_up = xen_hvm_cpu_up; -- cgit v0.10.2 From 60c5f08e154fd235056645e050f2cd5671b19125 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 11 Aug 2011 13:17:20 -0700 Subject: xen/tracing: Fix tracing config option properly Steven Rostedt says we should use CONFIG_EVENT_TRACING. Cc:Steven Rostedt Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Konrad Rzeszutek Wilk diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3326204..add2c2d 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -15,7 +15,7 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ grant-table.o suspend.o platform-pci-unplug.o \ p2m.o -obj-$(CONFIG_FTRACE) += trace.o +obj-$(CONFIG_EVENT_TRACING) += trace.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o -- cgit v0.10.2 From 5b9063b19caaffe7135e1f9b8b22174ded0f586b Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Sun, 21 Aug 2011 21:04:12 -0700 Subject: Input: ad714xx-spi - force SPI bus into the default 8-bit mode Signed-off-by: Michael Hennerich Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 4120dd5..da83ac9 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c @@ -54,6 +54,12 @@ static int ad714x_spi_write(struct device *dev, unsigned short reg, static int __devinit ad714x_spi_probe(struct spi_device *spi) { struct ad714x_chip *chip; + int err; + + spi->bits_per_word = 8; + err = spi_setup(spi); + if (err < 0) + return err; chip = ad714x_probe(&spi->dev, BUS_SPI, spi->irq, ad714x_spi_read, ad714x_spi_write); -- cgit v0.10.2 From 6337de2204be3b7b40825a1d30de30e514e8947b Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Sun, 21 Aug 2011 21:04:12 -0700 Subject: Input: ad714x - fix endianness issues Allow driver to be used on Big Endian boxes. Signed-off-by: Michael Hennerich Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c index e21deb1..00a6a22 100644 --- a/drivers/input/misc/ad714x-i2c.c +++ b/drivers/input/misc/ad714x-i2c.c @@ -32,17 +32,12 @@ static int ad714x_i2c_write(struct device *dev, unsigned short reg, { struct i2c_client *client = to_i2c_client(dev); int ret = 0; - u8 *_reg = (u8 *)® - u8 *_data = (u8 *)&data; - - u8 tx[4] = { - _reg[1], - _reg[0], - _data[1], - _data[0] + unsigned short tx[2] = { + cpu_to_be16(reg), + cpu_to_be16(data) }; - ret = i2c_master_send(client, tx, 4); + ret = i2c_master_send(client, (u8 *)tx, 4); if (ret < 0) dev_err(&client->dev, "I2C write error\n"); @@ -54,25 +49,16 @@ static int ad714x_i2c_read(struct device *dev, unsigned short reg, { struct i2c_client *client = to_i2c_client(dev); int ret = 0; - u8 *_reg = (u8 *)® - u8 *_data = (u8 *)data; + unsigned short tx = cpu_to_be16(reg); - u8 tx[2] = { - _reg[1], - _reg[0] - }; - u8 rx[2]; - - ret = i2c_master_send(client, tx, 2); + ret = i2c_master_send(client, (u8 *)&tx, 2); if (ret >= 0) - ret = i2c_master_recv(client, rx, 2); + ret = i2c_master_recv(client, (u8 *)data, 2); - if (unlikely(ret < 0)) { + if (unlikely(ret < 0)) dev_err(&client->dev, "I2C read error\n"); - } else { - _data[0] = rx[1]; - _data[1] = rx[0]; - } + else + *data = be16_to_cpu(*data); return ret; } diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index da83ac9..0c7f948 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c @@ -6,7 +6,7 @@ * Licensed under the GPL-2 or later. */ -#include /* BUS_I2C */ +#include /* BUS_SPI */ #include #include #include @@ -30,22 +30,28 @@ static int ad714x_spi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); -static int ad714x_spi_read(struct device *dev, unsigned short reg, - unsigned short *data) +static int ad714x_spi_read(struct device *dev, + unsigned short reg, unsigned short *data) { struct spi_device *spi = to_spi_device(dev); - unsigned short tx = AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg; + unsigned short tx = cpu_to_be16(AD714x_SPI_CMD_PREFIX | + AD714x_SPI_READ | reg); + int ret; - return spi_write_then_read(spi, (u8 *)&tx, 2, (u8 *)data, 2); + ret = spi_write_then_read(spi, &tx, 2, data, 2); + + *data = be16_to_cpup(data); + + return ret; } -static int ad714x_spi_write(struct device *dev, unsigned short reg, - unsigned short data) +static int ad714x_spi_write(struct device *dev, + unsigned short reg, unsigned short data) { struct spi_device *spi = to_spi_device(dev); unsigned short tx[2] = { - AD714x_SPI_CMD_PREFIX | reg, - data + cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg), + cpu_to_be16(data) }; return spi_write(spi, (u8 *)tx, 4); -- cgit v0.10.2 From c0409feb86893f5ccf73964c7b2b47ca64bdb014 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Mon, 22 Aug 2011 09:45:39 -0700 Subject: Input: ad714x - use DMA-safe buffers for spi_write() spi_write() requires use of DMA-safe (cacheline aligned) buffers. Also use the same buffers when reading data since to avoid extra locking and potential memory allocation in spi_write_then_read(). Acked-by: Michael Hennerich Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c index 00a6a22..6c61218 100644 --- a/drivers/input/misc/ad714x-i2c.c +++ b/drivers/input/misc/ad714x-i2c.c @@ -27,40 +27,46 @@ static int ad714x_i2c_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ad714x_i2c_pm, ad714x_i2c_suspend, ad714x_i2c_resume); -static int ad714x_i2c_write(struct device *dev, unsigned short reg, - unsigned short data) +static int ad714x_i2c_write(struct ad714x_chip *chip, + unsigned short reg, unsigned short data) { - struct i2c_client *client = to_i2c_client(dev); - int ret = 0; - unsigned short tx[2] = { - cpu_to_be16(reg), - cpu_to_be16(data) - }; - - ret = i2c_master_send(client, (u8 *)tx, 4); - if (ret < 0) - dev_err(&client->dev, "I2C write error\n"); - - return ret; + struct i2c_client *client = to_i2c_client(chip->dev); + int error; + + chip->xfer_buf[0] = cpu_to_be16(reg); + chip->xfer_buf[1] = cpu_to_be16(data); + + error = i2c_master_send(client, (u8 *)chip->xfer_buf, + 2 * sizeof(*chip->xfer_buf)); + if (unlikely(error < 0)) { + dev_err(&client->dev, "I2C write error: %d\n", error); + return error; + } + + return 0; } -static int ad714x_i2c_read(struct device *dev, unsigned short reg, - unsigned short *data) +static int ad714x_i2c_read(struct ad714x_chip *chip, + unsigned short reg, unsigned short *data) { - struct i2c_client *client = to_i2c_client(dev); - int ret = 0; - unsigned short tx = cpu_to_be16(reg); + struct i2c_client *client = to_i2c_client(chip->dev); + int error; + + chip->xfer_buf[0] = cpu_to_be16(reg); - ret = i2c_master_send(client, (u8 *)&tx, 2); - if (ret >= 0) - ret = i2c_master_recv(client, (u8 *)data, 2); + error = i2c_master_send(client, (u8 *)chip->xfer_buf, + sizeof(*chip->xfer_buf)); + if (error >= 0) + error = i2c_master_recv(client, (u8 *)chip->xfer_buf, + sizeof(*chip->xfer_buf)); - if (unlikely(ret < 0)) - dev_err(&client->dev, "I2C read error\n"); - else - *data = be16_to_cpu(*data); + if (unlikely(error < 0)) { + dev_err(&client->dev, "I2C read error: %d\n", error); + return error; + } - return ret; + *data = be16_to_cpup(chip->xfer_buf); + return 0; } static int __devinit ad714x_i2c_probe(struct i2c_client *client, diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 0c7f948..306577d 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c @@ -30,31 +30,54 @@ static int ad714x_spi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); -static int ad714x_spi_read(struct device *dev, +static int ad714x_spi_read(struct ad714x_chip *chip, unsigned short reg, unsigned short *data) { - struct spi_device *spi = to_spi_device(dev); - unsigned short tx = cpu_to_be16(AD714x_SPI_CMD_PREFIX | + struct spi_device *spi = to_spi_device(chip->dev); + struct spi_message message; + struct spi_transfer xfer[2]; + int error; + + spi_message_init(&message); + memset(xfer, 0, sizeof(xfer)); + + chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | AD714x_SPI_READ | reg); - int ret; + xfer[0].tx_buf = &chip->xfer_buf[0]; + xfer[0].len = sizeof(chip->xfer_buf[0]); + spi_message_add_tail(&xfer[0], &message); - ret = spi_write_then_read(spi, &tx, 2, data, 2); + xfer[1].rx_buf = &chip->xfer_buf[1]; + xfer[1].len = sizeof(chip->xfer_buf[1]); + spi_message_add_tail(&xfer[1], &message); - *data = be16_to_cpup(data); + error = spi_sync(spi, &message); + if (unlikely(error)) { + dev_err(chip->dev, "SPI read error: %d\n", error); + return error; + } - return ret; + *data = be16_to_cpu(chip->xfer_buf[1]); + return 0; } -static int ad714x_spi_write(struct device *dev, +static int ad714x_spi_write(struct ad714x_chip *chip, unsigned short reg, unsigned short data) { - struct spi_device *spi = to_spi_device(dev); - unsigned short tx[2] = { - cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg), - cpu_to_be16(data) - }; + struct spi_device *spi = to_spi_device(chip->dev); + int error; - return spi_write(spi, (u8 *)tx, 4); + chip->xfer_buf[0] = cpu_to_be16(AD714x_SPI_CMD_PREFIX | reg); + chip->xfer_buf[1] = cpu_to_be16(data); + + error = spi_write(spi, (u8 *)chip->xfer_buf, + 2 * sizeof(*chip->xfer_buf)); + if (unlikely(error)) { + dev_err(chip->dev, "SPI write error: %d\n", error); + return error; + } + + return 0; } static int __devinit ad714x_spi_probe(struct spi_device *spi) diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c index c3a62c4..2be0366c 100644 --- a/drivers/input/misc/ad714x.c +++ b/drivers/input/misc/ad714x.c @@ -59,7 +59,6 @@ #define STAGE11_AMBIENT 0x27D #define PER_STAGE_REG_NUM 36 -#define STAGE_NUM 12 #define STAGE_CFGREG_NUM 8 #define SYS_CFGREG_NUM 8 @@ -124,28 +123,6 @@ struct ad714x_driver_data { * information to integrate all things which will be private data * of spi/i2c device */ -struct ad714x_chip { - unsigned short h_state; - unsigned short l_state; - unsigned short c_state; - unsigned short adc_reg[STAGE_NUM]; - unsigned short amb_reg[STAGE_NUM]; - unsigned short sensor_val[STAGE_NUM]; - - struct ad714x_platform_data *hw; - struct ad714x_driver_data *sw; - - int irq; - struct device *dev; - ad714x_read_t read; - ad714x_write_t write; - - struct mutex mutex; - - unsigned product; - unsigned version; -}; - static void ad714x_use_com_int(struct ad714x_chip *ad714x, int start_stage, int end_stage) { @@ -154,13 +131,13 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x, mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); - ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); + ad714x->read(ad714x, STG_COM_INT_EN_REG, &data); data |= 1 << end_stage; - ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); + ad714x->write(ad714x, STG_COM_INT_EN_REG, data); - ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); + ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data); data &= ~mask; - ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); + ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); } static void ad714x_use_thr_int(struct ad714x_chip *ad714x, @@ -171,13 +148,13 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x, mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); - ad714x->read(ad714x->dev, STG_COM_INT_EN_REG, &data); + ad714x->read(ad714x, STG_COM_INT_EN_REG, &data); data &= ~(1 << end_stage); - ad714x->write(ad714x->dev, STG_COM_INT_EN_REG, data); + ad714x->write(ad714x, STG_COM_INT_EN_REG, data); - ad714x->read(ad714x->dev, STG_HIGH_INT_EN_REG, &data); + ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data); data |= mask; - ad714x->write(ad714x->dev, STG_HIGH_INT_EN_REG, data); + ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); } static int ad714x_cal_highest_stage(struct ad714x_chip *ad714x, @@ -274,10 +251,8 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx) int i; for (i = hw->start_stage; i <= hw->end_stage; i++) { - ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, - &ad714x->adc_reg[i]); - ad714x->read(ad714x->dev, - STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, + ad714x->read(ad714x, CDC_RESULT_S0 + i, &ad714x->adc_reg[i]); + ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, &ad714x->amb_reg[i]); ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] - @@ -445,10 +420,8 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx) int i; for (i = hw->start_stage; i <= hw->end_stage; i++) { - ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, - &ad714x->adc_reg[i]); - ad714x->read(ad714x->dev, - STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, + ad714x->read(ad714x, CDC_RESULT_S0 + i, &ad714x->adc_reg[i]); + ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, &ad714x->amb_reg[i]); if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) ad714x->sensor_val[i] = ad714x->adc_reg[i] - @@ -598,10 +571,8 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx) int i; for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { - ad714x->read(ad714x->dev, CDC_RESULT_S0 + i, - &ad714x->adc_reg[i]); - ad714x->read(ad714x->dev, - STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, + ad714x->read(ad714x, CDC_RESULT_S0 + i, &ad714x->adc_reg[i]); + ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, &ad714x->amb_reg[i]); if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) ad714x->sensor_val[i] = ad714x->adc_reg[i] - @@ -891,7 +862,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x) { unsigned short data; - ad714x->read(ad714x->dev, AD714X_PARTID_REG, &data); + ad714x->read(ad714x, AD714X_PARTID_REG, &data); switch (data & 0xFFF0) { case AD7142_PARTID: ad714x->product = 0x7142; @@ -940,23 +911,22 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x) for (i = 0; i < STAGE_NUM; i++) { reg_base = AD714X_STAGECFG_REG + i * STAGE_CFGREG_NUM; for (j = 0; j < STAGE_CFGREG_NUM; j++) - ad714x->write(ad714x->dev, reg_base + j, + ad714x->write(ad714x, reg_base + j, ad714x->hw->stage_cfg_reg[i][j]); } for (i = 0; i < SYS_CFGREG_NUM; i++) - ad714x->write(ad714x->dev, AD714X_SYSCFG_REG + i, + ad714x->write(ad714x, AD714X_SYSCFG_REG + i, ad714x->hw->sys_cfg_reg[i]); for (i = 0; i < SYS_CFGREG_NUM; i++) - ad714x->read(ad714x->dev, AD714X_SYSCFG_REG + i, - &data); + ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data); - ad714x->write(ad714x->dev, AD714X_STG_CAL_EN_REG, 0xFFF); + ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF); /* clear all interrupts */ - ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); - ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); - ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); + ad714x->read(ad714x, STG_LOW_INT_STA_REG, &data); + ad714x->read(ad714x, STG_HIGH_INT_STA_REG, &data); + ad714x->read(ad714x, STG_COM_INT_STA_REG, &data); } static irqreturn_t ad714x_interrupt_thread(int irq, void *data) @@ -966,9 +936,9 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data) mutex_lock(&ad714x->mutex); - ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &ad714x->l_state); - ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &ad714x->h_state); - ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &ad714x->c_state); + ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state); + ad714x->read(ad714x, STG_HIGH_INT_STA_REG, &ad714x->h_state); + ad714x->read(ad714x, STG_COM_INT_STA_REG, &ad714x->c_state); for (i = 0; i < ad714x->hw->button_num; i++) ad714x_button_state_machine(ad714x, i); @@ -1245,7 +1215,7 @@ int ad714x_disable(struct ad714x_chip *ad714x) mutex_lock(&ad714x->mutex); data = ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL] | 0x3; - ad714x->write(ad714x->dev, AD714X_PWR_CTRL, data); + ad714x->write(ad714x, AD714X_PWR_CTRL, data); mutex_unlock(&ad714x->mutex); @@ -1263,16 +1233,16 @@ int ad714x_enable(struct ad714x_chip *ad714x) /* resume to non-shutdown mode */ - ad714x->write(ad714x->dev, AD714X_PWR_CTRL, + ad714x->write(ad714x, AD714X_PWR_CTRL, ad714x->hw->sys_cfg_reg[AD714X_PWR_CTRL]); /* make sure the interrupt output line is not low level after resume, * otherwise we will get no chance to enter falling-edge irq again */ - ad714x->read(ad714x->dev, STG_LOW_INT_STA_REG, &data); - ad714x->read(ad714x->dev, STG_HIGH_INT_STA_REG, &data); - ad714x->read(ad714x->dev, STG_COM_INT_STA_REG, &data); + ad714x->read(ad714x, STG_LOW_INT_STA_REG, &data); + ad714x->read(ad714x, STG_HIGH_INT_STA_REG, &data); + ad714x->read(ad714x, STG_COM_INT_STA_REG, &data); mutex_unlock(&ad714x->mutex); diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h index 45c54fb..d12d149 100644 --- a/drivers/input/misc/ad714x.h +++ b/drivers/input/misc/ad714x.h @@ -11,11 +11,40 @@ #include +#define STAGE_NUM 12 + struct device; +struct ad714x_platform_data; +struct ad714x_driver_data; struct ad714x_chip; -typedef int (*ad714x_read_t)(struct device *, unsigned short, unsigned short *); -typedef int (*ad714x_write_t)(struct device *, unsigned short, unsigned short); +typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *); +typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short); + +struct ad714x_chip { + unsigned short h_state; + unsigned short l_state; + unsigned short c_state; + unsigned short adc_reg[STAGE_NUM]; + unsigned short amb_reg[STAGE_NUM]; + unsigned short sensor_val[STAGE_NUM]; + + struct ad714x_platform_data *hw; + struct ad714x_driver_data *sw; + + int irq; + struct device *dev; + ad714x_read_t read; + ad714x_write_t write; + + struct mutex mutex; + + unsigned product; + unsigned version; + + __be16 xfer_buf[16] ____cacheline_aligned; + +}; int ad714x_disable(struct ad714x_chip *ad714x); int ad714x_enable(struct ad714x_chip *ad714x); -- cgit v0.10.2 From 9eff794b777ac9ca034129a1b637204000c8fb29 Mon Sep 17 00:00:00 2001 From: Michael Hennerich Date: Mon, 22 Aug 2011 09:45:42 -0700 Subject: Input: ad714x - read the interrupt status registers in a row The interrupt status registers should be read in row to avoid invalid data. Alter "read" method for both bus options to allow reading several registers in a row and make sure we read interrupt status registers properly. Read sequence saves 50% of bus transactions compared to single register reads. So use it also for the result registers, which are also located in a row. Also update copyright notice. Signed-off-by: Michael Hennerich Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/ad714x-i2c.c b/drivers/input/misc/ad714x-i2c.c index 6c61218..025417d 100644 --- a/drivers/input/misc/ad714x-i2c.c +++ b/drivers/input/misc/ad714x-i2c.c @@ -1,7 +1,7 @@ /* * AD714X CapTouch Programmable Controller driver (I2C bus) * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -47,9 +47,10 @@ static int ad714x_i2c_write(struct ad714x_chip *chip, } static int ad714x_i2c_read(struct ad714x_chip *chip, - unsigned short reg, unsigned short *data) + unsigned short reg, unsigned short *data, size_t len) { struct i2c_client *client = to_i2c_client(chip->dev); + int i; int error; chip->xfer_buf[0] = cpu_to_be16(reg); @@ -58,14 +59,16 @@ static int ad714x_i2c_read(struct ad714x_chip *chip, sizeof(*chip->xfer_buf)); if (error >= 0) error = i2c_master_recv(client, (u8 *)chip->xfer_buf, - sizeof(*chip->xfer_buf)); + len * sizeof(*chip->xfer_buf)); if (unlikely(error < 0)) { dev_err(&client->dev, "I2C read error: %d\n", error); return error; } - *data = be16_to_cpup(chip->xfer_buf); + for (i = 0; i < len; i++) + data[i] = be16_to_cpu(chip->xfer_buf[i]); + return 0; } diff --git a/drivers/input/misc/ad714x-spi.c b/drivers/input/misc/ad714x-spi.c index 306577d..875b508 100644 --- a/drivers/input/misc/ad714x-spi.c +++ b/drivers/input/misc/ad714x-spi.c @@ -1,7 +1,7 @@ /* * AD714X CapTouch Programmable Controller driver (SPI bus) * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -31,11 +31,12 @@ static int ad714x_spi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ad714x_spi_pm, ad714x_spi_suspend, ad714x_spi_resume); static int ad714x_spi_read(struct ad714x_chip *chip, - unsigned short reg, unsigned short *data) + unsigned short reg, unsigned short *data, size_t len) { struct spi_device *spi = to_spi_device(chip->dev); struct spi_message message; struct spi_transfer xfer[2]; + int i; int error; spi_message_init(&message); @@ -48,7 +49,7 @@ static int ad714x_spi_read(struct ad714x_chip *chip, spi_message_add_tail(&xfer[0], &message); xfer[1].rx_buf = &chip->xfer_buf[1]; - xfer[1].len = sizeof(chip->xfer_buf[1]); + xfer[1].len = sizeof(chip->xfer_buf[1]) * len; spi_message_add_tail(&xfer[1], &message); error = spi_sync(spi, &message); @@ -57,7 +58,9 @@ static int ad714x_spi_read(struct ad714x_chip *chip, return error; } - *data = be16_to_cpu(chip->xfer_buf[1]); + for (i = 0; i < len; i++) + data[i] = be16_to_cpu(chip->xfer_buf[i + 1]); + return 0; } diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c index 2be0366c..ca42c7d 100644 --- a/drivers/input/misc/ad714x.c +++ b/drivers/input/misc/ad714x.c @@ -1,7 +1,7 @@ /* * AD714X CapTouch Programmable Controller driver supporting AD7142/3/7/8/7A * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -123,6 +123,7 @@ struct ad714x_driver_data { * information to integrate all things which will be private data * of spi/i2c device */ + static void ad714x_use_com_int(struct ad714x_chip *ad714x, int start_stage, int end_stage) { @@ -131,11 +132,11 @@ static void ad714x_use_com_int(struct ad714x_chip *ad714x, mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); - ad714x->read(ad714x, STG_COM_INT_EN_REG, &data); + ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1); data |= 1 << end_stage; ad714x->write(ad714x, STG_COM_INT_EN_REG, data); - ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data); + ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1); data &= ~mask; ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); } @@ -148,11 +149,11 @@ static void ad714x_use_thr_int(struct ad714x_chip *ad714x, mask = ((1 << (end_stage + 1)) - 1) - ((1 << start_stage) - 1); - ad714x->read(ad714x, STG_COM_INT_EN_REG, &data); + ad714x->read(ad714x, STG_COM_INT_EN_REG, &data, 1); data &= ~(1 << end_stage); ad714x->write(ad714x, STG_COM_INT_EN_REG, data); - ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data); + ad714x->read(ad714x, STG_HIGH_INT_EN_REG, &data, 1); data |= mask; ad714x->write(ad714x, STG_HIGH_INT_EN_REG, data); } @@ -250,13 +251,16 @@ static void ad714x_slider_cal_sensor_val(struct ad714x_chip *ad714x, int idx) struct ad714x_slider_plat *hw = &ad714x->hw->slider[idx]; int i; + ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, + &ad714x->adc_reg[hw->start_stage], + hw->end_stage - hw->start_stage + 1); + for (i = hw->start_stage; i <= hw->end_stage; i++) { - ad714x->read(ad714x, CDC_RESULT_S0 + i, &ad714x->adc_reg[i]); ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, - &ad714x->amb_reg[i]); + &ad714x->amb_reg[i], 1); - ad714x->sensor_val[i] = abs(ad714x->adc_reg[i] - - ad714x->amb_reg[i]); + ad714x->sensor_val[i] = + abs(ad714x->adc_reg[i] - ad714x->amb_reg[i]); } } @@ -419,13 +423,16 @@ static void ad714x_wheel_cal_sensor_val(struct ad714x_chip *ad714x, int idx) struct ad714x_wheel_plat *hw = &ad714x->hw->wheel[idx]; int i; + ad714x->read(ad714x, CDC_RESULT_S0 + hw->start_stage, + &ad714x->adc_reg[hw->start_stage], + hw->end_stage - hw->start_stage + 1); + for (i = hw->start_stage; i <= hw->end_stage; i++) { - ad714x->read(ad714x, CDC_RESULT_S0 + i, &ad714x->adc_reg[i]); ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, - &ad714x->amb_reg[i]); + &ad714x->amb_reg[i], 1); if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) - ad714x->sensor_val[i] = ad714x->adc_reg[i] - - ad714x->amb_reg[i]; + ad714x->sensor_val[i] = + ad714x->adc_reg[i] - ad714x->amb_reg[i]; else ad714x->sensor_val[i] = 0; } @@ -570,13 +577,16 @@ static void touchpad_cal_sensor_val(struct ad714x_chip *ad714x, int idx) struct ad714x_touchpad_plat *hw = &ad714x->hw->touchpad[idx]; int i; + ad714x->read(ad714x, CDC_RESULT_S0 + hw->x_start_stage, + &ad714x->adc_reg[hw->x_start_stage], + hw->x_end_stage - hw->x_start_stage + 1); + for (i = hw->x_start_stage; i <= hw->x_end_stage; i++) { - ad714x->read(ad714x, CDC_RESULT_S0 + i, &ad714x->adc_reg[i]); ad714x->read(ad714x, STAGE0_AMBIENT + i * PER_STAGE_REG_NUM, - &ad714x->amb_reg[i]); + &ad714x->amb_reg[i], 1); if (ad714x->adc_reg[i] > ad714x->amb_reg[i]) - ad714x->sensor_val[i] = ad714x->adc_reg[i] - - ad714x->amb_reg[i]; + ad714x->sensor_val[i] = + ad714x->adc_reg[i] - ad714x->amb_reg[i]; else ad714x->sensor_val[i] = 0; } @@ -862,7 +872,7 @@ static int ad714x_hw_detect(struct ad714x_chip *ad714x) { unsigned short data; - ad714x->read(ad714x, AD714X_PARTID_REG, &data); + ad714x->read(ad714x, AD714X_PARTID_REG, &data, 1); switch (data & 0xFFF0) { case AD7142_PARTID: ad714x->product = 0x7142; @@ -919,14 +929,12 @@ static void ad714x_hw_init(struct ad714x_chip *ad714x) ad714x->write(ad714x, AD714X_SYSCFG_REG + i, ad714x->hw->sys_cfg_reg[i]); for (i = 0; i < SYS_CFGREG_NUM; i++) - ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data); + ad714x->read(ad714x, AD714X_SYSCFG_REG + i, &data, 1); ad714x->write(ad714x, AD714X_STG_CAL_EN_REG, 0xFFF); /* clear all interrupts */ - ad714x->read(ad714x, STG_LOW_INT_STA_REG, &data); - ad714x->read(ad714x, STG_HIGH_INT_STA_REG, &data); - ad714x->read(ad714x, STG_COM_INT_STA_REG, &data); + ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); } static irqreturn_t ad714x_interrupt_thread(int irq, void *data) @@ -936,9 +944,7 @@ static irqreturn_t ad714x_interrupt_thread(int irq, void *data) mutex_lock(&ad714x->mutex); - ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state); - ad714x->read(ad714x, STG_HIGH_INT_STA_REG, &ad714x->h_state); - ad714x->read(ad714x, STG_COM_INT_STA_REG, &ad714x->c_state); + ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); for (i = 0; i < ad714x->hw->button_num; i++) ad714x_button_state_machine(ad714x, i); @@ -1225,8 +1231,6 @@ EXPORT_SYMBOL(ad714x_disable); int ad714x_enable(struct ad714x_chip *ad714x) { - unsigned short data; - dev_dbg(ad714x->dev, "%s enter\n", __func__); mutex_lock(&ad714x->mutex); @@ -1240,9 +1244,7 @@ int ad714x_enable(struct ad714x_chip *ad714x) * otherwise we will get no chance to enter falling-edge irq again */ - ad714x->read(ad714x, STG_LOW_INT_STA_REG, &data); - ad714x->read(ad714x, STG_HIGH_INT_STA_REG, &data); - ad714x->read(ad714x, STG_COM_INT_STA_REG, &data); + ad714x->read(ad714x, STG_LOW_INT_STA_REG, &ad714x->l_state, 3); mutex_unlock(&ad714x->mutex); diff --git a/drivers/input/misc/ad714x.h b/drivers/input/misc/ad714x.h index d12d149..3c85455 100644 --- a/drivers/input/misc/ad714x.h +++ b/drivers/input/misc/ad714x.h @@ -1,7 +1,7 @@ /* * AD714X CapTouch Programmable Controller driver (bus interfaces) * - * Copyright 2009 Analog Devices Inc. + * Copyright 2009-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ @@ -18,12 +18,12 @@ struct ad714x_platform_data; struct ad714x_driver_data; struct ad714x_chip; -typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *); +typedef int (*ad714x_read_t)(struct ad714x_chip *, unsigned short, unsigned short *, size_t); typedef int (*ad714x_write_t)(struct ad714x_chip *, unsigned short, unsigned short); struct ad714x_chip { - unsigned short h_state; unsigned short l_state; + unsigned short h_state; unsigned short c_state; unsigned short adc_reg[STAGE_NUM]; unsigned short amb_reg[STAGE_NUM]; -- cgit v0.10.2 From d5c073caf050bc713271a02e016b1672d9b7b935 Mon Sep 17 00:00:00 2001 From: Geoffrey Thomas Date: Mon, 22 Aug 2011 11:28:57 -0700 Subject: net: Documentation: RFC 2553bis is now RFC 3493 Signed-off-by: Geoffrey Thomas Signed-off-by: David S. Miller diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index db2a406..8154699 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -992,7 +992,7 @@ bindv6only - BOOLEAN TRUE: disable IPv4-mapped address feature FALSE: enable IPv4-mapped address feature - Default: FALSE (as specified in RFC2553bis) + Default: FALSE (as specified in RFC3493) IPv6 Fragmentation: -- cgit v0.10.2 From fcb8ce5cfe30ca9ca5c9a79cdfe26d1993e65e0c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 22 Aug 2011 11:42:53 -0700 Subject: Linux 3.1-rc3 diff --git a/Makefile b/Makefile index 3241d41..788511f 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 1 SUBLEVEL = 0 -EXTRAVERSION = -rc2 -NAME = Wet Seal +EXTRAVERSION = -rc3 +NAME = "Divemaster Edition" # *DOCUMENTATION* # To see a list of typical targets execute "make help" -- cgit v0.10.2 From 052605c6caa3e1edf8eee8fe5fe6d53f5721f39a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 26 Jul 2011 17:48:43 -0700 Subject: target: Make standard INQUIRY return 'not connected' for tpg_virt_lun0 This patch changes target_emulate_inquiry_std() to set the 'not connected' (0x35) bit in standard INQUIRY response data when we are processing a request to a virtual LUN=0 mapping from struct se_device *g_lun0_dev that have been setup for us in transport_lookup_cmd_lun(). This addresses an issue where qla2xxx FC clients need to be able to create demo-mode I_T FC Nexuses by default, but should not be exposing the default set of TPG LUNs to all FC clients. This includes adding an new optional target_core_fabric_ops->tpg_check_demo_mode_login_only() caller to allow demo_mode nexuses to skip the old default of bulding a demo-mode MappedLUNs list via core_tpg_add_node_to_devs(). (roland: Add missing tpg_check_demo_mode_login_only check in core_dev_add_lun) Reported-by: Roland Dreier Cc: Andrew Vasquez Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 8ae09a1..d095408 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; struct se_device *dev = cmd->se_dev; + struct se_portal_group *tpg = lun->lun_sep->sep_tpg; unsigned char *buf; /* @@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd) buf = transport_kmap_first_data_page(cmd); - buf[0] = dev->transport->get_device_type(dev); - if (buf[0] == TYPE_TAPE) - buf[1] = 0x80; + if (dev == tpg->tpg_virt_lun0.lun_se_dev) { + buf[0] = 0x3f; /* Not connected */ + } else { + buf[0] = dev->transport->get_device_type(dev); + if (buf[0] == TYPE_TAPE) + buf[1] = 0x80; + } buf[2] = dev->transport->get_device_rev(dev); /* diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b38b6c9..ec3fbcd 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1346,7 +1346,9 @@ struct se_lun *core_dev_add_lun( struct se_node_acl *acl; spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (acl->dynamic_node_acl) { + if (acl->dynamic_node_acl && + (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || + !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { spin_unlock_bh(&tpg->acl_node_lock); core_tpg_add_node_to_devs(acl, tpg); spin_lock_bh(&tpg->acl_node_lock); diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4f1ba4c..718ccd1 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -298,8 +298,16 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return NULL; } - - core_tpg_add_node_to_devs(acl, tpg); + /* + * Here we only create demo-mode MappedLUNs from the active + * TPG LUNs if the fabric is not explictly asking for + * tpg_check_demo_mode_login_only() == 1. + */ + if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && + (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) + do { ; } while (0); + else + core_tpg_add_node_to_devs(acl, tpg); spin_lock_bh(&tpg->acl_node_lock); list_add_tail(&acl->acl_list, &tpg->acl_node_list); diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 2de8fe9..126c675 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h @@ -27,6 +27,12 @@ struct target_core_fabric_ops { int (*tpg_check_demo_mode_cache)(struct se_portal_group *); int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *); int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *); + /* + * Optionally used by fabrics to allow demo-mode login, but not + * expose any TPG LUNs, and return 'not connected' in standard + * inquiry response + */ + int (*tpg_check_demo_mode_login_only)(struct se_portal_group *); struct se_node_acl *(*tpg_alloc_fabric_acl)( struct se_portal_group *); void (*tpg_release_fabric_acl)(struct se_portal_group *, -- cgit v0.10.2 From e1750ba20f0d850c38820190ccbf0f647723091a Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Mon, 1 Aug 2011 23:58:18 +0200 Subject: target: Use ERR_CAST inlined function Use ERR_CAST inlined function instead of ERR_PTR(PTR_ERR(...)) The semantic patch that makes this output is available in scripts/coccinelle/api/err_cast.cocci. More information about semantic patching is available at http://coccinelle.lip6.fr/ Signed-off-by: Thomas Meyer Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f095e65..f1643db 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg( ISCSI_TCP); if (IS_ERR(tpg_np)) { iscsit_put_tpg(tpg); - return ERR_PTR(PTR_ERR(tpg_np)); + return ERR_CAST(tpg_np); } pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); @@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn( tiqn = iscsit_add_tiqn((unsigned char *)name); if (IS_ERR(tiqn)) - return ERR_PTR(PTR_ERR(tiqn)); + return ERR_CAST(tiqn); /* * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group. */ diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index f165469..55bbe08 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl( se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); if (IS_ERR(se_nacl)) - return ERR_PTR(PTR_ERR(se_nacl)); + return ERR_CAST(se_nacl); nacl_cg = &se_nacl->acl_group; nacl_cg->default_groups = se_nacl->acl_default_groups; -- cgit v0.10.2 From 9be08c5804ae4ad96ec22d0b1e71e630803a85ea Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Tue, 2 Aug 2011 10:26:36 +0200 Subject: iscsi-target: Fix leak on failure in iscsi_copy_param_list() We leak memory if the allocations for 'new_param->name' or 'new_param->value' fail in iscsi_target_parameters.c::iscsi_copy_param_list() We also do a lot of variable assignments that are completely pointless if the allocations fail. So, let's move the allocations before the assignments and also make sure that we free whatever was allocated to one if the allocation fail. There's also some small CodingStyle fixups in there (curly braces on both branches of if statement, only one variable per line) since I was in the area anyway. And finally, error messages in the function are put on a single line for easy grep'abillity. Signed-off-by: Jesper Juhl Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 252e246..497b2e7 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -545,13 +545,13 @@ int iscsi_copy_param_list( struct iscsi_param_list *src_param_list, int leading) { - struct iscsi_param *new_param = NULL, *param = NULL; + struct iscsi_param *param = NULL; + struct iscsi_param *new_param = NULL; struct iscsi_param_list *param_list = NULL; param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL); if (!param_list) { - pr_err("Unable to allocate memory for" - " struct iscsi_param_list.\n"); + pr_err("Unable to allocate memory for struct iscsi_param_list.\n"); goto err_out; } INIT_LIST_HEAD(¶m_list->param_list); @@ -567,8 +567,17 @@ int iscsi_copy_param_list( new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL); if (!new_param) { - pr_err("Unable to allocate memory for" - " struct iscsi_param.\n"); + pr_err("Unable to allocate memory for struct iscsi_param.\n"); + goto err_out; + } + + new_param->name = kstrdup(param->name, GFP_KERNEL); + new_param->value = kstrdup(param->value, GFP_KERNEL); + if (!new_param->value || !new_param->name) { + kfree(new_param->value); + kfree(new_param->name); + kfree(new_param); + pr_err("Unable to allocate memory for parameter name/value.\n"); goto err_out; } @@ -580,32 +589,12 @@ int iscsi_copy_param_list( new_param->use = param->use; new_param->type_range = param->type_range; - new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); - if (!new_param->name) { - pr_err("Unable to allocate memory for" - " parameter name.\n"); - goto err_out; - } - - new_param->value = kzalloc(strlen(param->value) + 1, - GFP_KERNEL); - if (!new_param->value) { - pr_err("Unable to allocate memory for" - " parameter value.\n"); - goto err_out; - } - - memcpy(new_param->name, param->name, strlen(param->name)); - new_param->name[strlen(param->name)] = '\0'; - memcpy(new_param->value, param->value, strlen(param->value)); - new_param->value[strlen(param->value)] = '\0'; - list_add_tail(&new_param->p_list, ¶m_list->param_list); } - if (!list_empty(¶m_list->param_list)) + if (!list_empty(¶m_list->param_list)) { *dst_param_list = param_list; - else { + } else { pr_err("No parameters allocated.\n"); goto err_out; } -- cgit v0.10.2 From 6fc6148865c9a17cee33f251723f6a056f022ecd Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 2 Aug 2011 12:35:02 +0200 Subject: target: Convert target_core_rd.c to use use BUG_ON Use BUG_ON(x) rather than if(x) BUG(); The semantic patch that fixes this problem is as follows: (http://coccinelle.lip6.fr/) // @@ identifier x; @@ -if (x) BUG(); +BUG_ON(x); @@ identifier x; @@ -if (!x) BUG(); +BUG_ON(!x); // Signed-off-by: Julia Lawall Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 3dd81d2..e567e12 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req) length = req->rd_size; dst = sg_virt(&sg_d[i++]) + dst_offset; - if (!dst) - BUG(); + BUG_ON(!dst); src = sg_virt(&sg_s[j]) + src_offset; - if (!src) - BUG(); + BUG_ON(!src); dst_offset = 0; src_offset = length; @@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req) length = req->rd_size; dst = sg_virt(&sg_d[i]) + dst_offset; - if (!dst) - BUG(); + BUG_ON(!dst); if (sg_d[i].length == length) { i++; @@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req) dst_offset = length; src = sg_virt(&sg_s[j++]) + src_offset; - if (!src) - BUG(); + BUG_ON(!src); src_offset = 0; page_end = 1; @@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req) length = req->rd_size; src = sg_virt(&sg_s[i++]) + src_offset; - if (!src) - BUG(); + BUG_ON(!src); dst = sg_virt(&sg_d[j]) + dst_offset; - if (!dst) - BUG(); + BUG_ON(!dst); src_offset = 0; dst_offset = length; @@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req) length = req->rd_size; src = sg_virt(&sg_s[i]) + src_offset; - if (!src) - BUG(); + BUG_ON(!src); if (sg_s[i].length == length) { i++; @@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req) src_offset = length; dst = sg_virt(&sg_d[j++]) + dst_offset; - if (!dst) - BUG(); + BUG_ON(!dst); dst_offset = 0; page_end = 1; -- cgit v0.10.2 From c2337c709102b343bd917ae00c79b266fb15b871 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 8 Aug 2011 14:02:27 -0700 Subject: iscsi-target: remove duplicate return We returned on the line before already. Signed-off-by: Dan Carpenter Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c24fb10..6a4ea29 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -2243,7 +2243,6 @@ static int iscsit_handle_snack( case 0: return iscsit_handle_recovery_datain_or_r2t(conn, buf, hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); - return 0; case ISCSI_FLAG_SNACK_TYPE_STATUS: return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); -- cgit v0.10.2 From 387e96c05299ca7a0ade874f343f91f0b01086a0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 8 Aug 2011 14:06:44 -0700 Subject: iscsi-target: forever loop bug in iscsit_attach_ooo_cmdsn() This patch fixes a forever loop bug in iscsit_attach_ooo_cmdsn() while walking sess->sess_ooo_cmdsn_list when the received CmdSN is less than the tail of the list. Signed-off-by: Dan Carpenter Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 9806507..c4c68da 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn( */ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, ooo_list) { - while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) + if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) continue; list_add(&ooo_cmdsn->ooo_list, -- cgit v0.10.2 From 16ab8e60a0ebc22cfbe61d84e620457a15f3a0bc Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 8 Aug 2011 19:03:38 -0700 Subject: target: Fix write payload exception handling with ->new_cmd_map This patch fixes a bug for fabrics using tfo->new_cmd_map() that are expect transport_generic_request_failure() to be calling transport_send_check_condition_and_sense() for both READ and WRITE, instead of only for READ exceptions. This was originally observed with a failed WRITE_SAME_16 w/ unmap=0 using tcm_loop. Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index cc5a339..fd7d451 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2053,8 +2053,14 @@ static void transport_generic_request_failure( cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; } - - if (!sc) + /* + * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, + * make the call to transport_send_check_condition_and_sense() + * directly. Otherwise expect the fabric to make the call to + * transport_send_check_condition_and_sense() after handling + * possible unsoliticied write data payloads. + */ + if (!sc && !cmd->se_tfo->new_cmd_map) transport_new_cmd_failure(cmd); else { ret = transport_send_check_condition_and_sense(cmd, -- cgit v0.10.2 From 706d5860969b3b24d65d9a57bd3bb5e4a1419c08 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 28 Jul 2011 00:07:03 -0700 Subject: target: Add WRITE_SAME (10) parsing and refactor passthrough checks This patch adds initial WRITE_SAME (10) w/ UNMAP=1 support following updates in sbcr26 to allow UNMAP=1 for the non 16 + 32 byte CDB case. It also refactors current pSCSI passthrough passthrough checks into target_check_write_same_discard() ahead of UNMAP=0 w/ write payload support into target_core_iblock.c. This includes the support for handling WRITE_SAME in transport_emulate_control_cdb(), and converts target_emulate_write_same to accept num_blocks directly for WRITE_SAME, WRITE_SAME_16 and WRITE_SAME_32. Reported-by: Eric Seppanen Cc: Roland Dreier Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index d095408..4c1d3a9 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -1090,24 +1090,17 @@ err: * Note this is not used for TCM/pSCSI passthrough */ static int -target_emulate_write_same(struct se_task *task, int write_same32) +target_emulate_write_same(struct se_task *task, u32 num_blocks) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; sector_t range; sector_t lba = cmd->t_task_lba; - unsigned int num_blocks; int ret; /* - * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict - * range when non zero is supplied, otherwise calculate the remaining - * range based on ->get_blocks() - starting LBA. + * Use the explicit range when non zero is supplied, otherwise calculate + * the remaining range based on ->get_blocks() - starting LBA. */ - if (write_same32) - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); - else - num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); - if (num_blocks != 0) range = num_blocks; else @@ -1170,13 +1163,23 @@ transport_emulate_control_cdb(struct se_task *task) } ret = target_emulate_unmap(task); break; + case WRITE_SAME: + if (!dev->transport->do_discard) { + pr_err("WRITE_SAME emulation not supported" + " for: %s\n", dev->transport->name); + return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + } + ret = target_emulate_write_same(task, + get_unaligned_be16(&cmd->t_task_cdb[7])); + break; case WRITE_SAME_16: if (!dev->transport->do_discard) { pr_err("WRITE_SAME_16 emulation not supported" " for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } - ret = target_emulate_write_same(task, 0); + ret = target_emulate_write_same(task, + get_unaligned_be32(&cmd->t_task_cdb[10])); break; case VARIABLE_LENGTH_CMD: service_action = @@ -1189,7 +1192,8 @@ transport_emulate_control_cdb(struct se_task *task) dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } - ret = target_emulate_write_same(task, 1); + ret = target_emulate_write_same(task, + get_unaligned_be32(&cmd->t_task_cdb[28])); break; default: pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index fd7d451..eb8055a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2861,6 +2861,38 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) return sectors; } +static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) +{ + /* + * Determine if the received WRITE_SAME is used to for direct + * passthrough into Linux/SCSI with struct request via TCM/pSCSI + * or we are signaling the use of internal WRITE_SAME + UNMAP=1 + * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. + */ + int passthrough = (dev->transport->transport_type == + TRANSPORT_PLUGIN_PHBA_PDEV); + + if (!passthrough) { + if ((flags[0] & 0x04) || (flags[0] & 0x02)) { + pr_err("WRITE_SAME PBDATA and LBDATA" + " bits not supported for Block Discard" + " Emulation\n"); + return -ENOSYS; + } + /* + * Currently for the emulated case we only accept + * tpws with the UNMAP=1 bit set. + */ + if (!(flags[0] & 0x08)) { + pr_err("WRITE_SAME w/o UNMAP bit not" + " supported for Block Discard Emulation\n"); + return -ENOSYS; + } + } + + return 0; +} + /* transport_generic_cmd_sequencer(): * * Generic Command Sequencer that should work for most DAS transport @@ -3081,27 +3113,9 @@ static int transport_generic_cmd_sequencer( cmd->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; - /* - * Skip the remaining assignments for TCM/PSCSI passthrough - */ - if (passthrough) - break; - - if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { - pr_err("WRITE_SAME PBDATA and LBDATA" - " bits not supported for Block Discard" - " Emulation\n"); + if (target_check_write_same_discard(&cdb[10], dev) < 0) goto out_invalid_cdb_field; - } - /* - * Currently for the emulated case we only accept - * tpws with the UNMAP=1 bit set. - */ - if (!(cdb[10] & 0x08)) { - pr_err("WRITE_SAME w/o UNMAP bit not" - " supported for Block Discard Emulation\n"); - goto out_invalid_cdb_field; - } + break; default: pr_err("VARIABLE_LENGTH_CMD service action" @@ -3358,33 +3372,31 @@ static int transport_generic_cmd_sequencer( } cmd->t_task_lba = get_unaligned_be64(&cdb[2]); - passthrough = (dev->transport->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - /* - * Determine if the received WRITE_SAME_16 is used to for direct - * passthrough into Linux/SCSI with struct request via TCM/pSCSI - * or we are signaling the use of internal WRITE_SAME + UNMAP=1 - * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and - * TCM/FILEIO subsystem plugin backstores. - */ - if (!passthrough) { - if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { - pr_err("WRITE_SAME PBDATA and LBDATA" - " bits not supported for Block Discard" - " Emulation\n"); - goto out_invalid_cdb_field; - } - /* - * Currently for the emulated case we only accept - * tpws with the UNMAP=1 bit set. - */ - if (!(cdb[1] & 0x08)) { - pr_err("WRITE_SAME w/o UNMAP bit not " - " supported for Block Discard Emulation\n"); - goto out_invalid_cdb_field; - } + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + + if (target_check_write_same_discard(&cdb[1], dev) < 0) + goto out_invalid_cdb_field; + break; + case WRITE_SAME: + sectors = transport_get_sectors_10(cdb, cmd, §or_ret); + if (sector_ret) + goto out_unsupported_cdb; + + if (sectors) + size = transport_get_size(sectors, cdb, cmd); + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + goto out_invalid_cdb_field; } + + cmd->t_task_lba = get_unaligned_be32(&cdb[2]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + /* + * Follow sbcr26 with WRITE_SAME (10) and check for the existence + * of byte 1 bit 3 UNMAP instead of original reserved field + */ + if (target_check_write_same_discard(&cdb[1], dev) < 0) + goto out_invalid_cdb_field; break; case ALLOW_MEDIUM_REMOVAL: case GPCMD_CLOSE_TRACK: -- cgit v0.10.2 From 12850626e2717f866a94e6ced724e3efe5a0aab8 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 8 Aug 2011 19:08:23 -0700 Subject: target: Fix WRITE_SAME usage with transport_get_size For all flavours of WRITE_SAME, we only expect to handle a single block of data-out buffer payload, regardless of the number of logical blocks presented in the CDB. This patch changes all flavours of WRITE_SAME in transport_generic_cmd_sequencer() to pass '1' into transport_get_size() instead of the extracted 'sectors' to properly handle the default usage of sg_write_same without the --xferlen parameter. Reported-by: Eric Seppanen Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index eb8055a..d35c2cc 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3103,7 +3103,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; if (sectors) - size = transport_get_size(sectors, cdb, cmd); + size = transport_get_size(1, cdb, cmd); else { pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" " supported\n"); @@ -3365,7 +3365,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; if (sectors) - size = transport_get_size(sectors, cdb, cmd); + size = transport_get_size(1, cdb, cmd); else { pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); goto out_invalid_cdb_field; @@ -3383,7 +3383,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; if (sectors) - size = transport_get_size(sectors, cdb, cmd); + size = transport_get_size(1, cdb, cmd); else { pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); goto out_invalid_cdb_field; -- cgit v0.10.2 From 72f4ba1e32a1e5da31dcf14ea4b8985ae88a8bdb Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 9 Aug 2011 22:53:02 -0700 Subject: target: Remove duplicate task completions in transport_emulate_control_cdb This patch removes a duplicate set of transport_complete_task() calls in target_emulate_unmap() and target_emulate_write_same() as the completion call is already done within transport_emulate_control_cdb() This patch also adds a check in transport_emulate_control_cdb() for the existing SCF_EMULATE_CDB_ASYNC flag currently used by SYNCHRONIZE_CACHE in order to handle IMMEDIATE processing. Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 4c1d3a9..40ad142 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -1077,8 +1077,6 @@ target_emulate_unmap(struct se_task *task) size -= 16; } - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); err: transport_kunmap_first_data_page(cmd); @@ -1115,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, u32 num_blocks) return ret; } - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); return 0; } @@ -1228,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task) if (ret < 0) return ret; - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); + /* + * Handle the successful completion here unless a caller + * has explictly requested an asychronous completion. + */ + if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + } return PYX_TRANSPORT_SENT_TO_TRANSPORT; } -- cgit v0.10.2 From 7abbe7f3e4243e28a9169ee1b8d76f10a6f5d37c Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 10 Aug 2011 18:41:14 -0700 Subject: target: Fix SYNCHRONIZE_CACHE zero LBA + range breakage This patch fixes a SYNCHRONIZE_CACHE CDB handling bug with IBLOCK/FILEIO backends where transport_cmd_get_valid_sectors() was incorrectly rejecting a zero LBA + range CDB from being processed, and returning CHECK_CONDITION. This includes changing transport_cmd_get_valid_sectors() to return '0' on success and '-EINVAL' on failure (this makes more sense than sectors), and to only check transport_cmd_get_valid_sectors() when a non zero LBA + range SYNCHRONIZE_CACHE operation has been receieved for the non passthrough case. Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d35c2cc..d385c31 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2853,12 +2853,10 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) " transport_dev_end_lba(): %llu\n", cmd->t_task_lba, sectors, transport_dev_end_lba(dev)); - pr_err(" We should return CHECK_CONDITION" - " but we don't yet\n"); - return 0; + return -EINVAL; } - return sectors; + return 0; } static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) @@ -3350,10 +3348,12 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC; /* * Check to ensure that LBA + Range does not exceed past end of - * device. + * device for IBLOCK and FILEIO ->do_sync_cache() backend calls */ - if (!transport_cmd_get_valid_sectors(cmd)) - goto out_invalid_cdb_field; + if ((cmd->t_task_lba != 0) || (sectors != 0)) { + if (transport_cmd_get_valid_sectors(cmd) < 0) + goto out_invalid_cdb_field; + } break; case UNMAP: size = get_unaligned_be16(&cdb[7]); -- cgit v0.10.2 From 01cde4d54327884a0b61ce8666092f5703557d4b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 10 Aug 2011 00:59:58 -0700 Subject: target: Add missing DATA_SG_IO transport_cmd_get_valid_sectors check This patch adds the missing transport_cmd_get_valid_sectors() check for SCF_SCSI_DATA_SG_IO_CDB type payloads to ensure that a received LBA + range does not exeed past the end of associated backend struct se_device. This patch also fixes a bug in the failure path of transport_new_cmd_obj() where this check can fail, so change to use a signed 'rc' and return '-EINVAL' to signal proper transport_generic_request_failure() handling. Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d385c31..ab61c55 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3891,9 +3891,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); static int transport_new_cmd_obj(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - u32 task_cdbs; - u32 rc; - int set_counts = 1; + int set_counts = 1, rc, task_cdbs; /* * Setup any BIDI READ tasks and memory from @@ -3911,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return PYX_TRANSPORT_LU_COMM_FAILURE; + return -EINVAL; } atomic_inc(&cmd->t_fe_count); atomic_inc(&cmd->t_se_count); @@ -3930,7 +3928,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return PYX_TRANSPORT_LU_COMM_FAILURE; + return -EINVAL; } if (set_counts) { @@ -4248,10 +4246,13 @@ static u32 transport_allocate_tasks( struct scatterlist *sgl, unsigned int sgl_nents) { - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + if (transport_cmd_get_valid_sectors(cmd) < 0) + return -EINVAL; + return transport_allocate_data_tasks(cmd, lba, data_direction, sgl, sgl_nents); - else + } else return transport_allocate_control_task(cmd); } -- cgit v0.10.2 From 525a48a21da259d00d6ebc5b60563b5bcf022c26 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 13 Aug 2011 02:11:38 -0700 Subject: target: Fix task count > 1 handling breakage and use max_sector page alignment This patch addresses recent breakage with multiple se_task (task_count > 1) operation following backend dev->se_sub_dev->se_dev_attrib.max_sectors in new transport_allocate_data_tasks() code. The initial bug here was a bogus task->task_sg_nents assignment in transport_allocate_data_tasks() based on the passed parameter, which now uses DIV_ROUND_UP(task_size, PAGE_SIZE) to determine the proper number of per task SGL entries for the (task_count > 1) case. This also means we now need to enforce a PAGE_SIZE aligned max_sector count value for this to work as expected without bringing back the pre v3.1 transport_map_mem_to_sg() logic to handle SGL offsets across multiple tasks. So this patch adds se_dev_align_max_sectors() to round down max_sectors as necessary to ensure this alignment via se_dev_set_default_attribs() and se_dev_align_max_sectors() and keeps it simple for (task_count > 1) operation. So far this bugfix has been tested with (task_count > 1) operation using iscsi-target and iblock backends. Reported-by: Chris Boot Cc: Kiran Patil Cc: Andy Grover Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ec3fbcd..4b5237f 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev) return ret; } +u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) +{ + u32 tmp, aligned_max_sectors; + /* + * Limit max_sectors to a PAGE_SIZE aligned value for modern + * transport_allocate_data_tasks() operation. + */ + tmp = rounddown((max_sectors * block_size), PAGE_SIZE); + aligned_max_sectors = (tmp / block_size); + if (max_sectors != aligned_max_sectors) { + printk(KERN_INFO "Rounding down aligned max_sectors from %u" + " to %u\n", max_sectors, aligned_max_sectors); + return aligned_max_sectors; + } + + return max_sectors; +} + void se_dev_set_default_attribs( struct se_device *dev, struct se_dev_limits *dev_limits) @@ -878,6 +896,11 @@ void se_dev_set_default_attribs( * max_sectors is based on subsystem plugin dependent requirements. */ dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + /* + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() + */ + limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, + limits->logical_block_size); dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; /* * Set optimal_sectors from max_sectors, which can be lowered via @@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) return -EINVAL; } } + /* + * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() + */ + max_sectors = se_dev_align_max_sectors(max_sectors, + dev->se_sub_dev->se_dev_attrib.block_size); dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ab61c55..efac6a9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4126,7 +4126,11 @@ static int transport_allocate_data_tasks( /* Update new cdb with updated lba/sectors */ cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); - + /* + * This now assumes that passed sg_ents are in PAGE_SIZE chunks + * in order to calculate the number per task SGL entries + */ + task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE); /* * Check if the fabric module driver is requesting that all * struct se_task->task_sg[] be chained together.. If so, @@ -4136,7 +4140,6 @@ static int transport_allocate_data_tasks( * It's so much easier and only a waste when task_count > 1. * That is extremely rare. */ - task->task_sg_nents = sgl_nents; if (cmd->se_tfo->task_sg_chaining) { task->task_sg_nents++; task->task_padded_sg = 1; -- cgit v0.10.2 From c3c74c7a33d837be391ab61aaae39bb21f16736a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 13 Aug 2011 05:30:31 -0700 Subject: target: Fix task SGL chaining breakage with transport_allocate_data_tasks This patch fixes two bugs associated with transport_do_task_sg_chain() operation where transport_allocate_data_tasks() was incorrectly setting task_padded_sg for all tasks, and causing bogus task->task_sg_nents assignments + OOPsen with fabrics depending upon this code. The first bit here adds a task_sg_nents_padded check in transport_allocate_data_tasks() to include an extra SGL vector when necessary for tasks that expect to be linked using sg_chain(). The second change involves making transport_do_task_sg_chain() properly account for the extra SGL vector when task->task_padded_sg is set for the non trailing ->task_sg or single ->task_sg allocations. Note this patch also removes the BUG_ON(!task->task_padded_sg) check within transport_do_task_sg_chain() as we expect this to happen normally with the updated logic in transport_allocate_data_tasks(), along with being bogus for CONTROL_SG_IO_CDB type payloads. So far this bugfix has been tested with tcm_qla2xxx and iblock backends in (task_count > 1)( and (task_count == 1) operation. Reported-by: Kiran Patil Cc: Kiran Patil Cc: Andy Grover Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index efac6a9..9cc49d1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4044,8 +4044,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) if (!task->task_sg) continue; - BUG_ON(!task->task_padded_sg); - if (!sg_first) { sg_first = task->task_sg; chained_nents = task->task_sg_nents; @@ -4053,9 +4051,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) sg_chain(sg_prev, sg_prev_nents, task->task_sg); chained_nents += task->task_sg_nents; } + /* + * For the padded tasks, use the extra SGL vector allocated + * in transport_allocate_data_tasks() for the sg_prev_nents + * offset into sg_chain() above.. The last task of a + * multi-task list, or a single task will not have + * task->task_sg_padded set.. + */ + if (task->task_padded_sg) + sg_prev_nents = (task->task_sg_nents + 1); + else + sg_prev_nents = task->task_sg_nents; sg_prev = task->task_sg; - sg_prev_nents = task->task_sg_nents; } /* * Setup the starting pointer and total t_tasks_sg_linked_no including @@ -4107,7 +4115,7 @@ static int transport_allocate_data_tasks( cmd_sg = sgl; for (i = 0; i < task_count; i++) { - unsigned int task_size; + unsigned int task_size, task_sg_nents_padded; int count; task = transport_generic_get_task(cmd, data_direction); @@ -4135,24 +4143,24 @@ static int transport_allocate_data_tasks( * Check if the fabric module driver is requesting that all * struct se_task->task_sg[] be chained together.. If so, * then allocate an extra padding SG entry for linking and - * marking the end of the chained SGL. - * Possibly over-allocate task sgl size by using cmd sgl size. - * It's so much easier and only a waste when task_count > 1. - * That is extremely rare. + * marking the end of the chained SGL for every task except + * the last one for (task_count > 1) operation, or skipping + * the extra padding for the (task_count == 1) case. */ - if (cmd->se_tfo->task_sg_chaining) { - task->task_sg_nents++; + if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { + task_sg_nents_padded = (task->task_sg_nents + 1); task->task_padded_sg = 1; - } + } else + task_sg_nents_padded = task->task_sg_nents; task->task_sg = kmalloc(sizeof(struct scatterlist) * - task->task_sg_nents, GFP_KERNEL); + task_sg_nents_padded, GFP_KERNEL); if (!task->task_sg) { cmd->se_dev->transport->free_task(task); return -ENOMEM; } - sg_init_table(task->task_sg, task->task_sg_nents); + sg_init_table(task->task_sg, task_sg_nents_padded); task_size = task->task_size; -- cgit v0.10.2 From 6626a0572657a0945a7b9ccf4a6d6ad1750f9adc Mon Sep 17 00:00:00 2001 From: Chris Boot Date: Sat, 13 Aug 2011 22:10:46 -0700 Subject: iscsi-target: Implement iSCSI target IPv6 address printing. The iSCSI target configfs code to print out an initiator's IPv6 address is not fully implemented. This patch uses snprintf() with the "%pI6c" format string to format the IPv6 address for display purposes. Signed-off-by: Chris Boot Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bcaf82f..daad362 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ISCSI_LOGIN_STATUS_TARGET_ERROR); goto new_sess_out; } -#if 0 - if (!iscsi_ntop6((const unsigned char *) - &sock_in6.sin6_addr.in6_u, - (char *)&conn->ipv6_login_ip[0], - IPV6_ADDRESS_SPACE)) { - pr_err("iscsi_ntop6() failed\n"); - iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, - ISCSI_LOGIN_STATUS_TARGET_ERROR); - goto new_sess_out; - } -#else - pr_debug("Skipping iscsi_ntop6()\n"); -#endif + snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", + &sock_in6.sin6_addr.in6_u); + conn->login_port = ntohs(sock_in6.sin6_port); } else { memset(&sock_in, 0, sizeof(struct sockaddr_in)); -- cgit v0.10.2 From ba7736696341ad4253125055c0c85aa9f42959a0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 13 Aug 2011 22:35:00 -0700 Subject: iscsi-target: Fix iscsit_allocate_se_cmd_for_tmr failure path bugs This patch fixes two bugs in allocation failure handling in iscsit_allocate_se_cmd_for_tmr(): This first reported by DanC is a free-after call to transport_free_se_cmd(), this patch drops the transport_free_se_cmd() call all together, as iscsit_release_cmd() will release existing allocations as expected. The second is a bug where iscsi_cmd_t was being leaked on a cmd->tmr_req allocation failure, so make this jump to iscsit_release_cmd() as well. Signed-off-by: Dan Carpenter Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a1acb01..a0d23bc 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( if (!cmd->tmr_req) { pr_err("Unable to allocate memory for" " Task Management command!\n"); - return NULL; + goto out; } /* * TASK_REASSIGN for ERL=2 / connection stays inside of @@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr( return cmd; out: iscsit_release_cmd(cmd); - if (se_cmd) - transport_free_se_cmd(se_cmd); return NULL; } -- cgit v0.10.2 From f15ea5780d08e4c96930c0d607d05e480ec588c8 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 12 Aug 2011 10:01:24 -0700 Subject: target: Print subpage too for unhandled MODE SENSE pages Make a log message more useful by printing both the page and subpage that an initiator is requesting. Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 40ad142..89ae923 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -920,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) length += target_modesense_control(dev, &buf[offset+length]); break; default: - pr_err("Got Unknown Mode Page: 0x%02x\n", - cdb[2] & 0x3f); + pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", + cdb[2] & 0x3f, cdb[3]); return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; } offset += length; -- cgit v0.10.2 From 4e0f05297ff615a9a4e269da301ff77f660a3ab0 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 12 Aug 2011 10:16:52 -0700 Subject: tcm_fc: init/exit functions should not be protected by "#ifdef MODULE" There's no need for the #ifdef protection when building into the kernel, and in fact we need the module_init() for the initialization function to be called. Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8781d1e..520a8ba 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -655,9 +655,7 @@ static void __exit ft_exit(void) synchronize_rcu(); } -#ifdef MODULE MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION); MODULE_LICENSE("GPL"); module_init(ft_init); module_exit(ft_exit); -#endif /* MODULE */ -- cgit v0.10.2 From e63a8e1933a2218cf801e46dd01bd8cca4a555ec Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 12 Aug 2011 16:01:02 -0700 Subject: target: Make locking in transport_deregister_session() IRQ safe At least the tcm_qla2xxx fabric driver calls into transport_deregister_session() while holding an IRQ-disabled spinlock, so the inner locking needs to use spin_lock_irqsave() instead of spin_lock_bh(). This fixes warnings seen with tcm_qla2xxx like: WARNING: at kernel/softirq.c:159 local_bh_enable_ip+0x98/0xb0() Call Trace: [] warn_slowpath_common+0x7f/0xc0 [] warn_slowpath_null+0x1a/0x20 [] local_bh_enable_ip+0x98/0xb0 [] _raw_spin_unlock_bh+0x14/0x20 [] transport_deregister_session+0x96/0x180 [target_core_mod] [] tcm_qla2xxx_free_session+0xd1/0x170 [tcm_qla2xxx] [] qla_tgt_sess_put+0xc3/0x140 [qla2xxx] [] qla_tgt_stop_phase1+0x8f/0x2c0 [qla2xxx] [] tcm_qla2xxx_tpg_store_enable+0x6e/0xd0 [tcm_qla2xxx] [] target_fabric_tpg_attr_store+0x39/0x40 [target_core_mod] [] configfs_write_file+0xbd/0x120 [configfs] [] vfs_write+0xc6/0x180 [] sys_write+0x51/0x90 [] system_call_fastpath+0x16/0x1b Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9cc49d1..8d0c58e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess) { struct se_portal_group *se_tpg = se_sess->se_tpg; struct se_node_acl *se_nacl; + unsigned long flags; if (!se_tpg) { transport_free_session(se_sess); return; } - spin_lock_bh(&se_tpg->session_lock); + spin_lock_irqsave(&se_tpg->session_lock, flags); list_del(&se_sess->sess_list); se_sess->se_tpg = NULL; se_sess->fabric_sess_ptr = NULL; - spin_unlock_bh(&se_tpg->session_lock); + spin_unlock_irqrestore(&se_tpg->session_lock, flags); /* * Determine if we need to do extra work for this initiator node's @@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess) */ se_nacl = se_sess->se_node_acl; if (se_nacl) { - spin_lock_bh(&se_tpg->acl_node_lock); + spin_lock_irqsave(&se_tpg->acl_node_lock, flags); if (se_nacl->dynamic_node_acl) { if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( se_tpg)) { list_del(&se_nacl->acl_list); se_tpg->num_node_acls--; - spin_unlock_bh(&se_tpg->acl_node_lock); + spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); core_tpg_wait_for_nacl_pr_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); - spin_lock_bh(&se_tpg->acl_node_lock); + spin_lock_irqsave(&se_tpg->acl_node_lock, flags); } } - spin_unlock_bh(&se_tpg->acl_node_lock); + spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags); } transport_free_session(se_sess); -- cgit v0.10.2 From 28638887f351d11867562322b7abaa014dd5528a Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 16 Aug 2011 09:40:01 -0700 Subject: target: Convert acl_node_lock to be IRQ-disabling With qla2xxx, acl_node_lock is taken inside qla2xxx's hardware_lock, which is taken in hardirq context. This means acl_node_lock must become an IRQ-disabling lock; in particular this fixes lockdep warnings along the lines of ====================================================== [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ] (&(&se_tpg->acl_node_lock)->rlock){+.....}, at: [] transport_deregister_session+0x92/0x140 [target_core_mod] and this task is already holding: (&(&ha->hardware_lock)->rlock){-.-...}, at: [] qla_tgt_stop_phase1+0x57/0x2c0 [qla2xxx] which would create a new lock dependency: (&(&ha->hardware_lock)->rlock){-.-...} -> (&(&se_tpg->acl_node_lock)->rlock){+.....} but this new dependency connects a HARDIRQ-irq-safe lock: (&(&ha->hardware_lock)->rlock){-.-...} to a HARDIRQ-irq-unsafe lock: (&(&se_tpg->acl_node_lock)->rlock){+.....} Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 4b5237f..ca6e4a4 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) struct se_dev_entry *deve; u32 i; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { @@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) } spin_unlock_irq(&nacl->device_list_lock); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); } static struct se_port *core_alloc_port(struct se_device *dev) @@ -1372,17 +1372,17 @@ struct se_lun *core_dev_add_lun( */ if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { struct se_node_acl *acl; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { if (acl->dynamic_node_acl && (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); core_tpg_add_node_to_devs(acl, tpg); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); } } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); } return lun_p; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 1c1b849..7fd3a16 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port( * from the decoded fabric module specific TransportID * at *i_str. */ - spin_lock_bh(&tmp_tpg->acl_node_lock); + spin_lock_irq(&tmp_tpg->acl_node_lock); dest_node_acl = __core_tpg_get_initiator_node_acl( tmp_tpg, i_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_inc(); } - spin_unlock_bh(&tmp_tpg->acl_node_lock); + spin_unlock_irq(&tmp_tpg->acl_node_lock); if (!dest_node_acl) { core_scsi3_tpg_undepend_item(tmp_tpg); @@ -3496,14 +3496,14 @@ after_iport_check: /* * Locate the destination struct se_node_acl from the received Transport ID */ - spin_lock_bh(&dest_se_tpg->acl_node_lock); + spin_lock_irq(&dest_se_tpg->acl_node_lock); dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, initiator_str); if (dest_node_acl) { atomic_inc(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_inc(); } - spin_unlock_bh(&dest_se_tpg->acl_node_lock); + spin_unlock_irq(&dest_se_tpg->acl_node_lock); if (!dest_node_acl) { pr_err("Unable to locate %s dest_node_acl for" diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 718ccd1..162b736 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( { struct se_node_acl *acl; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { if (!strcmp(acl->initiatorname, initiatorname) && !acl->dynamic_node_acl) { - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return acl; } } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return NULL; } @@ -309,10 +309,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( else core_tpg_add_node_to_devs(acl, tpg); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_add_tail(&acl->acl_list, &tpg->acl_node_list); tpg->num_node_acls++; - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), @@ -362,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( { struct se_node_acl *acl = NULL; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); if (acl) { if (acl->dynamic_node_acl) { @@ -370,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( pr_debug("%s_TPG[%u] - Replacing dynamic ACL" " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); /* * Release the locally allocated struct se_node_acl * because * core_tpg_add_initiator_node_acl() returned @@ -386,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( " Node %s already exists for TPG %u, ignoring" " request.\n", tpg->se_tpg_tfo->get_fabric_name(), initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return ERR_PTR(-EEXIST); } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); if (!se_nacl) { pr_err("struct se_node_acl pointer is NULL\n"); @@ -426,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( return ERR_PTR(-EINVAL); } - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); list_add_tail(&acl->acl_list, &tpg->acl_node_list); tpg->num_node_acls++; - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); done: pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" @@ -453,14 +453,14 @@ int core_tpg_del_initiator_node_acl( struct se_session *sess, *sess_tmp; int dynamic_acl = 0; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; dynamic_acl = 1; } list_del(&acl->acl_list); tpg->num_node_acls--; - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); spin_lock_bh(&tpg->session_lock); list_for_each_entry_safe(sess, sess_tmp, @@ -511,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth( struct se_node_acl *acl; int dynamic_acl = 0; - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); if (!acl) { pr_err("Access Control List entry for %s Initiator" " Node %s does not exists for TPG %hu, ignoring" " request.\n", tpg->se_tpg_tfo->get_fabric_name(), initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return -ENODEV; } if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; dynamic_acl = 1; } - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); spin_lock_bh(&tpg->session_lock); list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { @@ -541,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth( tpg->se_tpg_tfo->get_fabric_name(), initiatorname); spin_unlock_bh(&tpg->session_lock); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); if (dynamic_acl) acl->dynamic_node_acl = 1; - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return -EEXIST; } /* @@ -579,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth( if (init_sess) tpg->se_tpg_tfo->close_session(init_sess); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); if (dynamic_acl) acl->dynamic_node_acl = 1; - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return -EINVAL; } spin_unlock_bh(&tpg->session_lock); @@ -598,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth( initiatorname, tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg)); - spin_lock_bh(&tpg->acl_node_lock); + spin_lock_irq(&tpg->acl_node_lock); if (dynamic_acl) acl->dynamic_node_acl = 1; - spin_unlock_bh(&tpg->acl_node_lock); + spin_unlock_irq(&tpg->acl_node_lock); return 0; } @@ -725,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) * not been released because of TFO->tpg_check_demo_mode_cache() == 1 * in transport_deregister_session(). */ - spin_lock_bh(&se_tpg->acl_node_lock); + spin_lock_irq(&se_tpg->acl_node_lock); list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, acl_list) { list_del(&nacl->acl_list); se_tpg->num_node_acls--; - spin_unlock_bh(&se_tpg->acl_node_lock); + spin_unlock_irq(&se_tpg->acl_node_lock); core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); - spin_lock_bh(&se_tpg->acl_node_lock); + spin_lock_irq(&se_tpg->acl_node_lock); } - spin_unlock_bh(&se_tpg->acl_node_lock); + spin_unlock_irq(&se_tpg->acl_node_lock); if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) core_tpg_release_virtual_lun0(se_tpg); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 520a8ba..b15879d 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) struct se_portal_group *se_tpg = &tpg->se_tpg; struct se_node_acl *se_acl; - spin_lock_bh(&se_tpg->acl_node_lock); + spin_lock_irq(&se_tpg->acl_node_lock); list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { acl = container_of(se_acl, struct ft_node_acl, se_node_acl); pr_debug("acl %p port_name %llx\n", @@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) break; } } - spin_unlock_bh(&se_tpg->acl_node_lock); + spin_unlock_irq(&se_tpg->acl_node_lock); return found; } -- cgit v0.10.2 From 858a914324c7786f483661e3a89bc8fbe50f1b9d Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 16 Aug 2011 08:15:26 -0700 Subject: hwmon: (ntc_thermistor) Simplify if sequence Replace unnecessary if with else statement. This fixes the following (false) compile warning reported with some combinations of C compiler version and configuration. drivers/hwmon/ntc_thermistor.c: In function 'ntc_show_temp': drivers/hwmon/ntc_thermistor.c:225: warning: 'low' may be used uninitialized in this function drivers/hwmon/ntc_thermistor.c:225: note: 'low' was declared here drivers/hwmon/ntc_thermistor.c:225: warning: 'high' may be used uninitialized in this function drivers/hwmon/ntc_thermistor.c:225: note: 'high' was declared here drivers/hwmon/ntc_thermistor.c:294: warning: 'temp' may be used uninitialized in this function Signed-off-by: Guenter Roeck Acked-by: Jean Delvare diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index d7926f4..eab1161 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -211,8 +211,7 @@ static int lookup_comp(struct ntc_data *data, if (data->comp[mid].ohm <= ohm) { *i_low = mid; *i_high = mid - 1; - } - if (data->comp[mid].ohm > ohm) { + } else { *i_low = mid + 1; *i_high = mid; } -- cgit v0.10.2 From b6bede3b4cdfbd188557ab50fceec2e91d295edf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 14 Aug 2011 17:13:00 +0000 Subject: xfs: fix tracing builds inside the source tree The code really requires the current source directory to be in the header search path. We already do this if building with an object tree separate from the source, but it needs to be added manually if building inside the source. The cflags addition for it accidentally got removed when collapsing the xfs directory structure. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index ffce328..427a4e8 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -16,6 +16,8 @@ # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # +ccflags-y += -I$(src) # needed for trace events + ccflags-$(CONFIG_XFS_DEBUG) += -g obj-$(CONFIG_XFS_FS) += xfs.o -- cgit v0.10.2 From 11f3a6bdc2528d1ce2af50202dbf7138fdee1b34 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 22 Aug 2011 06:05:59 +0000 Subject: bridge: fix a possible net_device leak Jan Beulich reported a possible net_device leak in bridge code after commit bb900b27a2f4 (bridge: allow creating bridge devices with netlink) Reported-by: Jan Beulich Signed-off-by: Eric Dumazet Acked-by: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 2cdf007..e738154 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -231,6 +231,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, int br_add_bridge(struct net *net, const char *name) { struct net_device *dev; + int res; dev = alloc_netdev(sizeof(struct net_bridge), name, br_dev_setup); @@ -240,7 +241,10 @@ int br_add_bridge(struct net *net, const char *name) dev_net_set(dev, net); - return register_netdev(dev); + res = register_netdev(dev); + if (res) + free_netdev(dev); + return res; } int br_del_bridge(struct net *net, const char *name) -- cgit v0.10.2 From f5e4282586dc0c9dab8c7d32e6c43aa07f68586b Mon Sep 17 00:00:00 2001 From: Jeremiah Matthey Date: Tue, 23 Aug 2011 09:44:30 +0200 Subject: HID: usbhid: Add support for SiGma Micro chip Patch to add SiGma Micro-based keyboards (1c4f:0002) to hid-quirks. These keyboards dont seem to allow the records to be initialized, and hence a timeout occurs when the usbhid driver attempts to initialize them. The patch just adds the signature for these keyboards to the hid-quirks list with the setting HID_QUIRK_NO_INIT_REPORTS. This removes the 5-10 second wait for the timeout to occur. Signed-off-by: Jeremiah Matthey Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 61c8809..7d27d2b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -579,6 +579,9 @@ #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 +#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f +#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002 + #define USB_VENDOR_ID_SKYCABLE 0x1223 #define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 621959d..4bdb5d4 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -89,6 +89,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, { 0, 0 } }; -- cgit v0.10.2 From 7c4c3960dff109bc5db4c35da481c212dadb5eb5 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Mon, 22 Aug 2011 21:17:57 +0000 Subject: drm/ttm: fix ttm_bo_add_ttm(user) failure path ttm_tt_destroy kfrees passed object, so we need to nullify a reference to it. Signed-off-by: Marcin Slusarz Cc: stable@kernel.org Reviewed-by: Thomas Hellstrom Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 56619f6..384116a 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -353,8 +353,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) ret = ttm_tt_set_user(bo->ttm, current, bo->buffer_start, bo->num_pages); - if (unlikely(ret != 0)) + if (unlikely(ret != 0)) { ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; + } break; default: printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); -- cgit v0.10.2 From eac2095398668f989a3dd8d00be1b87850d78c01 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 22 Aug 2011 03:15:04 +0000 Subject: drm/ttm: unbind ttm before destroying node in accel move cleanup Nouveau makes the assumption that if a TTM is bound there will be a mm_node around for it and the backwards ordering here resulted in a use-after-free on some eviction paths. Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 77dbf40..ae3c6f5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -635,13 +635,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (ret) return ret; - ttm_bo_free_old_node(bo); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } + ttm_bo_free_old_node(bo); } else { /** * This should help pipeline ordinary buffer moves. -- cgit v0.10.2 From 8d3bb23609d4ae22803a15d232289fc09a7b61c4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 22 Aug 2011 03:15:05 +0000 Subject: drm/ttm: ensure ttm for new node is bound before calling move_notify() This was true for new TTM_PL_SYSTEM and new TTM_PL_TT cases, but wasn't the case on TTM_PL_SYSTEM<->TTM_PL_TT moves, which causes trouble on some paths as nouveau's move_notify() hook requires that the dma addresses be valid at this point. Signed-off-by: Ben Skeggs Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 384116a..a4d38d8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -392,10 +392,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, * Create and bind a ttm if required. */ - if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { - ret = ttm_bo_add_ttm(bo, false); - if (ret) - goto out_err; + if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { + if (bo->ttm == NULL) { + ret = ttm_bo_add_ttm(bo, false); + if (ret) + goto out_err; + } ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) -- cgit v0.10.2 From 3989ef6cfb80825af2f7933415797f052817ac3e Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 17 Aug 2011 11:43:20 +0200 Subject: HID: wiimote: Simplify synchronization The new locking scheme in HID core allows us to remove a bit of synchronization. Since the HID layer acts synchronously we simply register input core last and there are no synchonization issues anymore. Also register sysfs files after that to simplify the code. Signed-off-by: David Herrmann Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index a594383..8a68bf5 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c @@ -10,7 +10,6 @@ * any later version. */ -#include #include #include #include @@ -33,7 +32,6 @@ struct wiimote_state { }; struct wiimote_data { - atomic_t ready; struct hid_device *hdev; struct input_dev *input; @@ -200,9 +198,6 @@ static ssize_t wiifs_led_show_##num(struct device *dev, \ unsigned long flags; \ int state; \ \ - if (!atomic_read(&wdata->ready)) \ - return -EBUSY; \ - \ spin_lock_irqsave(&wdata->state.lock, flags); \ state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ spin_unlock_irqrestore(&wdata->state.lock, flags); \ @@ -217,9 +212,6 @@ static ssize_t wiifs_led_set_##num(struct device *dev, \ unsigned long flags; \ __u8 state; \ \ - if (!atomic_read(&wdata->ready)) \ - return -EBUSY; \ - \ spin_lock_irqsave(&wdata->state.lock, flags); \ \ state = wdata->state.flags; \ @@ -244,13 +236,6 @@ wiifs_led_show_set(4); static int wiimote_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { - struct wiimote_data *wdata = input_get_drvdata(dev); - - if (!atomic_read(&wdata->ready)) - return -EBUSY; - /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ - smp_rmb(); - return 0; } @@ -300,11 +285,6 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, int i; unsigned long flags; - if (!atomic_read(&wdata->ready)) - return -EBUSY; - /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */ - smp_rmb(); - if (size < 1) return -EINVAL; @@ -362,6 +342,15 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) static void wiimote_destroy(struct wiimote_data *wdata) { + device_remove_file(&wdata->hdev->dev, &dev_attr_led1); + device_remove_file(&wdata->hdev->dev, &dev_attr_led2); + device_remove_file(&wdata->hdev->dev, &dev_attr_led3); + device_remove_file(&wdata->hdev->dev, &dev_attr_led4); + + input_unregister_device(wdata->input); + cancel_work_sync(&wdata->worker); + hid_hw_stop(wdata->hdev); + kfree(wdata); } @@ -377,19 +366,6 @@ static int wiimote_hid_probe(struct hid_device *hdev, return -ENOMEM; } - ret = device_create_file(&hdev->dev, &dev_attr_led1); - if (ret) - goto err; - ret = device_create_file(&hdev->dev, &dev_attr_led2); - if (ret) - goto err; - ret = device_create_file(&hdev->dev, &dev_attr_led3); - if (ret) - goto err; - ret = device_create_file(&hdev->dev, &dev_attr_led4); - if (ret) - goto err; - ret = hid_parse(hdev); if (ret) { hid_err(hdev, "HID parse failed\n"); @@ -408,9 +384,19 @@ static int wiimote_hid_probe(struct hid_device *hdev, goto err_stop; } - /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */ - smp_wmb(); - atomic_set(&wdata->ready, 1); + ret = device_create_file(&hdev->dev, &dev_attr_led1); + if (ret) + goto err_free; + ret = device_create_file(&hdev->dev, &dev_attr_led2); + if (ret) + goto err_free; + ret = device_create_file(&hdev->dev, &dev_attr_led3); + if (ret) + goto err_free; + ret = device_create_file(&hdev->dev, &dev_attr_led4); + if (ret) + goto err_free; + hid_info(hdev, "New device registered\n"); /* by default set led1 after device initialization */ @@ -420,15 +406,15 @@ static int wiimote_hid_probe(struct hid_device *hdev, return 0; +err_free: + wiimote_destroy(wdata); + return ret; + err_stop: hid_hw_stop(hdev); err: input_free_device(wdata->input); - device_remove_file(&hdev->dev, &dev_attr_led1); - device_remove_file(&hdev->dev, &dev_attr_led2); - device_remove_file(&hdev->dev, &dev_attr_led3); - device_remove_file(&hdev->dev, &dev_attr_led4); - wiimote_destroy(wdata); + kfree(wdata); return ret; } @@ -437,16 +423,6 @@ static void wiimote_hid_remove(struct hid_device *hdev) struct wiimote_data *wdata = hid_get_drvdata(hdev); hid_info(hdev, "Device removed\n"); - - device_remove_file(&hdev->dev, &dev_attr_led1); - device_remove_file(&hdev->dev, &dev_attr_led2); - device_remove_file(&hdev->dev, &dev_attr_led3); - device_remove_file(&hdev->dev, &dev_attr_led4); - - hid_hw_stop(hdev); - input_unregister_device(wdata->input); - - cancel_work_sync(&wdata->worker); wiimote_destroy(wdata); } -- cgit v0.10.2 From 26af17484a737aaa991a7ce578cb15809a582fbc Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 17 Aug 2011 11:43:21 +0200 Subject: HID: wiimote: Correctly call HID open/close callbacks Even though the bluetooth hid backend does not react on open/close callbacks, we should call them to be consistent with other hid drivers. Also the new input open/close handlers will be used in future to prepare the wiimote device for IR/extension input. Signed-off-by: David Herrmann Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index 8a68bf5..d49f67c 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c @@ -239,6 +239,20 @@ static int wiimote_input_event(struct input_dev *dev, unsigned int type, return 0; } +static int wiimote_input_open(struct input_dev *dev) +{ + struct wiimote_data *wdata = input_get_drvdata(dev); + + return hid_hw_open(wdata->hdev); +} + +static void wiimote_input_close(struct input_dev *dev) +{ + struct wiimote_data *wdata = input_get_drvdata(dev); + + hid_hw_close(wdata->hdev); +} + static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) { input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT], @@ -321,6 +335,8 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) input_set_drvdata(wdata->input, wdata); wdata->input->event = wiimote_input_event; + wdata->input->open = wiimote_input_open; + wdata->input->close = wiimote_input_close; wdata->input->dev.parent = &wdata->hdev->dev; wdata->input->id.bustype = wdata->hdev->bus; wdata->input->id.vendor = wdata->hdev->vendor; -- cgit v0.10.2 From 23a5a4a39eddbe515a832767a371cc54e82cc25e Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 17 Aug 2011 11:43:22 +0200 Subject: HID: wiimote: Register led class devices This registers 4 led devices to allow controlling the wiimote leds via standard LED sysfs API. It removes the four sysfs attributes so we don't have two APIs for one device. Signed-off-by: David Herrmann Signed-off-by: Jiri Kosina diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 306b15f..1130a89 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -589,6 +589,7 @@ config HID_WACOM_POWER_SUPPLY config HID_WIIMOTE tristate "Nintendo Wii Remote support" depends on BT_HIDP + depends on LEDS_CLASS ---help--- Support for the Nintendo Wii Remote bluetooth device. diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index d49f67c..29edd55 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include "hid-ids.h" @@ -34,6 +35,7 @@ struct wiimote_state { struct wiimote_data { struct hid_device *hdev; struct input_dev *input; + struct led_classdev *leds[4]; spinlock_t qlock; __u8 head; @@ -51,6 +53,9 @@ struct wiimote_data { #define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4) +/* return flag for led \num */ +#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1)) + enum wiiproto_reqs { WIIPROTO_REQ_LED = 0x11, WIIPROTO_REQ_DRM_K = 0x30, @@ -85,9 +90,6 @@ static __u16 wiiproto_keymap[] = { BTN_MODE, /* WIIPROTO_KEY_HOME */ }; -#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \ - dev)) - static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, size_t count) { @@ -190,48 +192,53 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds) wiimote_queue(wdata, cmd, sizeof(cmd)); } -#define wiifs_led_show_set(num) \ -static ssize_t wiifs_led_show_##num(struct device *dev, \ - struct device_attribute *attr, char *buf) \ -{ \ - struct wiimote_data *wdata = dev_to_wii(dev); \ - unsigned long flags; \ - int state; \ - \ - spin_lock_irqsave(&wdata->state.lock, flags); \ - state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \ - spin_unlock_irqrestore(&wdata->state.lock, flags); \ - \ - return sprintf(buf, "%d\n", state); \ -} \ -static ssize_t wiifs_led_set_##num(struct device *dev, \ - struct device_attribute *attr, const char *buf, size_t count) \ -{ \ - struct wiimote_data *wdata = dev_to_wii(dev); \ - int tmp = simple_strtoul(buf, NULL, 10); \ - unsigned long flags; \ - __u8 state; \ - \ - spin_lock_irqsave(&wdata->state.lock, flags); \ - \ - state = wdata->state.flags; \ - \ - if (tmp) \ - wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\ - else \ - wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\ - \ - spin_unlock_irqrestore(&wdata->state.lock, flags); \ - \ - return count; \ -} \ -static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \ - wiifs_led_set_##num) - -wiifs_led_show_set(1); -wiifs_led_show_set(2); -wiifs_led_show_set(3); -wiifs_led_show_set(4); +static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev) +{ + struct wiimote_data *wdata; + struct device *dev = led_dev->dev->parent; + int i; + unsigned long flags; + bool value = false; + + wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); + + for (i = 0; i < 4; ++i) { + if (wdata->leds[i] == led_dev) { + spin_lock_irqsave(&wdata->state.lock, flags); + value = wdata->state.flags & WIIPROTO_FLAG_LED(i + 1); + spin_unlock_irqrestore(&wdata->state.lock, flags); + break; + } + } + + return value ? LED_FULL : LED_OFF; +} + +static void wiimote_leds_set(struct led_classdev *led_dev, + enum led_brightness value) +{ + struct wiimote_data *wdata; + struct device *dev = led_dev->dev->parent; + int i; + unsigned long flags; + __u8 state, flag; + + wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev)); + + for (i = 0; i < 4; ++i) { + if (wdata->leds[i] == led_dev) { + flag = WIIPROTO_FLAG_LED(i + 1); + spin_lock_irqsave(&wdata->state.lock, flags); + state = wdata->state.flags; + if (value == LED_OFF) + wiiproto_req_leds(wdata, state & ~flag); + else + wiiproto_req_leds(wdata, state | flag); + spin_unlock_irqrestore(&wdata->state.lock, flags); + break; + } + } +} static int wiimote_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) @@ -315,6 +322,58 @@ static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, return 0; } +static void wiimote_leds_destroy(struct wiimote_data *wdata) +{ + int i; + struct led_classdev *led; + + for (i = 0; i < 4; ++i) { + if (wdata->leds[i]) { + led = wdata->leds[i]; + wdata->leds[i] = NULL; + led_classdev_unregister(led); + kfree(led); + } + } +} + +static int wiimote_leds_create(struct wiimote_data *wdata) +{ + int i, ret; + struct device *dev = &wdata->hdev->dev; + size_t namesz = strlen(dev_name(dev)) + 9; + struct led_classdev *led; + char *name; + + for (i = 0; i < 4; ++i) { + led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL); + if (!led) { + ret = -ENOMEM; + goto err; + } + name = (void*)&led[1]; + snprintf(name, namesz, "%s:blue:p%d", dev_name(dev), i); + led->name = name; + led->brightness = 0; + led->max_brightness = 1; + led->brightness_get = wiimote_leds_get; + led->brightness_set = wiimote_leds_set; + + ret = led_classdev_register(dev, led); + if (ret) { + kfree(led); + goto err; + } + wdata->leds[i] = led; + } + + return 0; + +err: + wiimote_leds_destroy(wdata); + return ret; +} + static struct wiimote_data *wiimote_create(struct hid_device *hdev) { struct wiimote_data *wdata; @@ -358,10 +417,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev) static void wiimote_destroy(struct wiimote_data *wdata) { - device_remove_file(&wdata->hdev->dev, &dev_attr_led1); - device_remove_file(&wdata->hdev->dev, &dev_attr_led2); - device_remove_file(&wdata->hdev->dev, &dev_attr_led3); - device_remove_file(&wdata->hdev->dev, &dev_attr_led4); + wiimote_leds_destroy(wdata); input_unregister_device(wdata->input); cancel_work_sync(&wdata->worker); @@ -400,16 +456,7 @@ static int wiimote_hid_probe(struct hid_device *hdev, goto err_stop; } - ret = device_create_file(&hdev->dev, &dev_attr_led1); - if (ret) - goto err_free; - ret = device_create_file(&hdev->dev, &dev_attr_led2); - if (ret) - goto err_free; - ret = device_create_file(&hdev->dev, &dev_attr_led3); - if (ret) - goto err_free; - ret = device_create_file(&hdev->dev, &dev_attr_led4); + ret = wiimote_leds_create(wdata); if (ret) goto err_free; -- cgit v0.10.2 From 2cb5e4bc530471e9596cd32390bf70c8ada13d9a Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 17 Aug 2011 11:43:23 +0200 Subject: HID: wiimote: Add drm request The wiimote reports data in several data reporting modes (DRM). The DRM request makes the wiimote send data in the requested drm. The DRM mode can be set explicitely or can be chosen by the driver. To let the driver choose the DRM mode, pass WIIPROTO_REQ_NULL placeholder to it. This is no valid request and is replaced with an appropriate DRM. Currently, the driver always sets the basic DRM_K mode, but this will be extended when further peripherals like accelerometer and IR are supported. Signed-off-by: David Herrmann Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index 29edd55..84c9eb9 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c @@ -57,7 +57,9 @@ struct wiimote_data { #define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1)) enum wiiproto_reqs { + WIIPROTO_REQ_NULL = 0x0, WIIPROTO_REQ_LED = 0x11, + WIIPROTO_REQ_DRM = 0x12, WIIPROTO_REQ_DRM_K = 0x30, }; @@ -192,6 +194,30 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds) wiimote_queue(wdata, cmd, sizeof(cmd)); } +/* + * Check what peripherals of the wiimote are currently + * active and select a proper DRM that supports all of + * the requested data inputs. + */ +static __u8 select_drm(struct wiimote_data *wdata) +{ + return WIIPROTO_REQ_DRM_K; +} + +static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm) +{ + __u8 cmd[3]; + + if (drm == WIIPROTO_REQ_NULL) + drm = select_drm(wdata); + + cmd[0] = WIIPROTO_REQ_DRM; + cmd[1] = 0; + cmd[2] = drm; + + wiimote_queue(wdata, cmd, sizeof(cmd)); +} + static enum led_brightness wiimote_leds_get(struct led_classdev *led_dev) { struct wiimote_data *wdata; -- cgit v0.10.2 From c87019e41d61f3f972bd2f6a2380fc9896e4ab74 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Wed, 17 Aug 2011 11:43:24 +0200 Subject: HID: wiimote: Add status and return request handlers The wiimote resets the current drm when an extension is plugged in. Fortunately, it also sends a status report in this situation so we just reset the drm on every status report to keep the drm consistent. Also handle return reports from the wiimote which indicate success and failure of requests that we've sent. Signed-off-by: David Herrmann Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c index 84c9eb9..85a02e5 100644 --- a/drivers/hid/hid-wiimote.c +++ b/drivers/hid/hid-wiimote.c @@ -60,6 +60,8 @@ enum wiiproto_reqs { WIIPROTO_REQ_NULL = 0x0, WIIPROTO_REQ_LED = 0x11, WIIPROTO_REQ_DRM = 0x12, + WIIPROTO_REQ_STATUS = 0x20, + WIIPROTO_REQ_RETURN = 0x22, WIIPROTO_REQ_DRM_K = 0x30, }; @@ -313,6 +315,26 @@ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) input_sync(wdata->input); } +static void handler_status(struct wiimote_data *wdata, const __u8 *payload) +{ + handler_keys(wdata, payload); + + /* on status reports the drm is reset so we need to resend the drm */ + wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); +} + +static void handler_return(struct wiimote_data *wdata, const __u8 *payload) +{ + __u8 err = payload[3]; + __u8 cmd = payload[2]; + + handler_keys(wdata, payload); + + if (err) + hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err, + cmd); +} + struct wiiproto_handler { __u8 id; size_t size; @@ -320,6 +342,8 @@ struct wiiproto_handler { }; static struct wiiproto_handler handlers[] = { + { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status }, + { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return }, { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, { .id = 0 } }; -- cgit v0.10.2 From f2b60717e692550bf753a5d64a5b69ea430fc832 Mon Sep 17 00:00:00 2001 From: Thomas Reim Date: Wed, 17 Aug 2011 09:03:32 +0000 Subject: drm/radeon: Extended DDC Probing for Toshiba L300D Radeon Mobility X1100 HDMI-A Connector Toshiba Satellite L300D with ATI Mobility Radeon X1100 sends data to i2c bus for a HDMI connector that is not implemented/existent on the notebook's board. Fix by applying extented DDC probing for this connector. Requires [PATCH] drm/radeon: Extended DDC Probing for Connectors with Improperly Wired DDC Lines Tested for kernel 2.6.38 on Toshiba Satellite L300D notebook BugLink: http://bugs.launchpad.net/bugs/826677 Signed-off-by: Thomas Reim Acked-by: Chris Routh Cc: Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 7f65940..4f0c1ec 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -466,6 +466,16 @@ static bool radeon_connector_needs_extended_probe(struct radeon_device *dev, (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) return true; } + /* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100 + * (RS690M) sends data to i2c bus for a HDMI connector that + * is not implemented */ + if ((dev->pdev->device == 0x791f) && + (dev->pdev->subsystem_vendor == 0x1179) && + (dev->pdev->subsystem_device == 0xff68)) { + if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) && + (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) + return true; + } /* Default: no EDID header probe required for DDC probing */ return false; -- cgit v0.10.2 From 69dd3d8e29e294caaf63eb5e8a72d250279f9e5f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 23 Aug 2011 10:36:51 -0700 Subject: Revert "irq: Always set IRQF_ONESHOT if no primary handler is specified" This reverts commit f3637a5f2e2eb391ff5757bc83fb5de8f9726464. It turns out that this breaks several drivers, one example being OMAP boards which use the on-board OMAP UARTs and the omap-serial driver that will not boot to userspace after the commit. Paul Walmsley reports that enabling CONFIG_DEBUG_SHIRQ reveals 'IRQ handler type mismatch' errors: IRQ handler type mismatch for IRQ 74 current handler: serial idle ... and the reason is that setting IRQF_ONESHOT will now result in those interrupt handlers having different IRQF flags, and thus being unsharable. So the commit log in the reverted commit: "Since it is required for those users and there is no difference for others it makes sense to add this flag unconditionally." is simply not true: there may not be any difference from a "actions at irq time", but there is a *big* difference wrt this flag testing irq management (see __setup_irq() in kernel/irq/manage.c). One solution may be to stop verifying IRQF_ONESHOT in __setup_irq(), but right now the safe course of action is to revert the change. Let's revisit this in a later merge window. Reported-by: Paul Walmsley Cc: Sebastian Andrzej Siewior Requested-by: Alan Cox Acked-by: Thomas Gleixner Signed-off-by: Linus Torvalds diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2e94258..9b956fa 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1331,7 +1331,6 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; - irqflags |= IRQF_ONESHOT; } action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); -- cgit v0.10.2 From 4b723a471050a8b80f7fa86e76f01f4c711b3443 Mon Sep 17 00:00:00 2001 From: srinidhi kasagar Date: Tue, 9 Aug 2011 20:17:22 +0200 Subject: i2c-nomadik: Do not use _interruptible_ variant call If there is a signal pending and wait_for_completion_interruptible_timeout exited because of the -ERESTARTSYS error we are unable to send any more i2c messages. So, deprecate this _interruptible_ variant call. Signed-off-by: Srinidhi Kasagar Signed-off-by: Linus Walleij Signed-off-by: Ben Dooks diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 0c731ca..f9b8854 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -417,12 +417,12 @@ static int read_i2c(struct nmk_i2c_dev *dev) writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, dev->virtbase + I2C_IMSCR); - timeout = wait_for_completion_interruptible_timeout( + timeout = wait_for_completion_timeout( &dev->xfer_complete, dev->adap.timeout); if (timeout < 0) { dev_err(&dev->pdev->dev, - "wait_for_completion_interruptible_timeout" + "wait_for_completion_timeout" "returned %d waiting for event\n", timeout); status = timeout; } @@ -504,12 +504,12 @@ static int write_i2c(struct nmk_i2c_dev *dev) writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask, dev->virtbase + I2C_IMSCR); - timeout = wait_for_completion_interruptible_timeout( + timeout = wait_for_completion_timeout( &dev->xfer_complete, dev->adap.timeout); if (timeout < 0) { dev_err(&dev->pdev->dev, - "wait_for_completion_interruptible_timeout" + "wait_for_completion_timeout" "returned %d waiting for event\n", timeout); status = timeout; } -- cgit v0.10.2 From 584b408d37af4e0b38ad5b60f236381bcdf396bc Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Thu, 4 Aug 2011 07:53:02 -0700 Subject: Revert "i2c-omap: fix static suspend vs. runtime suspend" This reverts commit adf6e07922255937c8bfeea777d19502b4c9a2be. Remove system PM methods which can race with runtime PM methods. Also, as of v3.1, the PM domain level code for OMAP handles device power state transistions automatically for devices, so drivers no longer need to specifically call the bus/pm_domain methods themselves. Signed-off-by: Kevin Hilman Signed-off-by: Ben Dooks diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1a766cf..2dfb631 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1139,41 +1139,12 @@ omap_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_SUSPEND -static int omap_i2c_suspend(struct device *dev) -{ - if (!pm_runtime_suspended(dev)) - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) - dev->bus->pm->runtime_suspend(dev); - - return 0; -} - -static int omap_i2c_resume(struct device *dev) -{ - if (!pm_runtime_suspended(dev)) - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) - dev->bus->pm->runtime_resume(dev); - - return 0; -} - -static struct dev_pm_ops omap_i2c_pm_ops = { - .suspend = omap_i2c_suspend, - .resume = omap_i2c_resume, -}; -#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) -#else -#define OMAP_I2C_PM_OPS NULL -#endif - static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, .remove = omap_i2c_remove, .driver = { .name = "omap_i2c", .owner = THIS_MODULE, - .pm = OMAP_I2C_PM_OPS, }, }; -- cgit v0.10.2 From ba8f318471f66d5d5b79da68112525cf432b2b18 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Thu, 18 Aug 2011 09:37:02 +0100 Subject: m68k: fix __page_to_pfn for a const struct page argument Fixes fallout due to the removal of the cast in commit aa462abe8aaf ("mm: fix __page_to_pfn for a const struct page argument") Signed-off-by: Ian Campbell Cc: Andrew Morton Acked-by: Geert Uytterhoeven Cc: linux-m68k@lists.linux-m68k.org Signed-off-by: Linus Torvalds diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h index 31d5570..89f2014 100644 --- a/arch/m68k/include/asm/page_mm.h +++ b/arch/m68k/include/asm/page_mm.h @@ -162,7 +162,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void) pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ }) #define page_to_pfn(_page) ({ \ - struct page *__p = (_page); \ + const struct page *__p = (_page); \ struct pglist_data *pgdat; \ pgdat = &pg_data_map[page_to_nid(__p)]; \ ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ -- cgit v0.10.2 From 7ca0758cdb7c241cb4e0490a8d95f0eb5b861daf Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 22 Aug 2011 13:27:06 -0700 Subject: x86-32, vdso: On system call restart after SYSENTER, use int $0x80 When we enter a 32-bit system call via SYSENTER or SYSCALL, we shuffle the arguments to match the int $0x80 calling convention. This was probably a design mistake, but it's what it is now. This causes errors if the system call as to be restarted. For SYSENTER, we have to invoke the instruction from the vdso as the return address is hardcoded. Accordingly, we can simply replace the jump in the vdso with an int $0x80 instruction and use the slower entry point for a post-restart. Suggested-by: Linus Torvalds Signed-off-by: H. Peter Anvin Link: http://lkml.kernel.org/r/CA%2B55aFztZ=r5wa0x26KJQxvZOaQq8s2v3u50wCyJcA-Sc4g8gQ@mail.gmail.com Cc: diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S index e2800af..e354bce 100644 --- a/arch/x86/vdso/vdso32/sysenter.S +++ b/arch/x86/vdso/vdso32/sysenter.S @@ -43,7 +43,7 @@ __kernel_vsyscall: .space 7,0x90 /* 14: System call restart point is here! (SYSENTER_RETURN-2) */ - jmp .Lenter_kernel + int $0x80 /* 16: System call normal return point is here! */ VDSO32_SYSENTER_RETURN: /* Symbol used by sysenter.c via vdso32-syms.h */ pop %ebp -- cgit v0.10.2 From b4cb0d4da745bc1d806b9b4a27cc4ce1f7adbf99 Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Tue, 23 Aug 2011 21:04:28 -0700 Subject: hwmon: (i5k_amb) Drop i5k_channel_pci_id Function i5k_channel_pci_id looks like it can fail, while a better code design would make it more obvious that it can't. We can even get rid of the function. Signed-off-by: Jean Delvare Acked-by: Darrick J. Wong Signed-off-by: Guenter Roeck diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index c4c40be..d22f241 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c @@ -114,7 +114,6 @@ struct i5k_amb_data { void __iomem *amb_mmio; struct i5k_device_attribute *attrs; unsigned int num_attrs; - unsigned long chipset_id; }; static ssize_t show_name(struct device *dev, struct device_attribute *devattr, @@ -444,8 +443,6 @@ static int __devinit i5k_find_amb_registers(struct i5k_amb_data *data, goto out; } - data->chipset_id = devid; - res = 0; out: pci_dev_put(pcidev); @@ -478,23 +475,13 @@ out: return res; } -static unsigned long i5k_channel_pci_id(struct i5k_amb_data *data, - unsigned long channel) -{ - switch (data->chipset_id) { - case PCI_DEVICE_ID_INTEL_5000_ERR: - return PCI_DEVICE_ID_INTEL_5000_FBD0 + channel; - case PCI_DEVICE_ID_INTEL_5400_ERR: - return PCI_DEVICE_ID_INTEL_5400_FBD0 + channel; - default: - BUG(); - } -} - -static unsigned long chipset_ids[] = { - PCI_DEVICE_ID_INTEL_5000_ERR, - PCI_DEVICE_ID_INTEL_5400_ERR, - 0 +static struct { + unsigned long err; + unsigned long fbd0; +} chipset_ids[] __devinitdata = { + { PCI_DEVICE_ID_INTEL_5000_ERR, PCI_DEVICE_ID_INTEL_5000_FBD0 }, + { PCI_DEVICE_ID_INTEL_5400_ERR, PCI_DEVICE_ID_INTEL_5400_FBD0 }, + { 0, 0 } }; #ifdef MODULE @@ -510,8 +497,7 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) { struct i5k_amb_data *data; struct resource *reso; - int i; - int res = -ENODEV; + int i, res; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) @@ -520,22 +506,22 @@ static int __devinit i5k_amb_probe(struct platform_device *pdev) /* Figure out where the AMB registers live */ i = 0; do { - res = i5k_find_amb_registers(data, chipset_ids[i]); + res = i5k_find_amb_registers(data, chipset_ids[i].err); + if (res == 0) + break; i++; - } while (res && chipset_ids[i]); + } while (chipset_ids[i].err); if (res) goto err; /* Copy the DIMM presence map for the first two channels */ - res = i5k_channel_probe(&data->amb_present[0], - i5k_channel_pci_id(data, 0)); + res = i5k_channel_probe(&data->amb_present[0], chipset_ids[i].fbd0); if (res) goto err; /* Copy the DIMM presence map for the optional second two channels */ - i5k_channel_probe(&data->amb_present[2], - i5k_channel_pci_id(data, 1)); + i5k_channel_probe(&data->amb_present[2], chipset_ids[i].fbd0 + 1); /* Set up resource regions */ reso = request_mem_region(data->amb_base, data->amb_len, DRVNAME); -- cgit v0.10.2 From c2183d1e9b3f313dd8ba2b1b0197c8d9fb86a7ae Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 24 Aug 2011 10:20:17 +0200 Subject: fuse: check size of FUSE_NOTIFY_INVAL_ENTRY message FUSE_NOTIFY_INVAL_ENTRY didn't check the length of the write so the message processing could overrun and result in a "kernel BUG at fs/fuse/dev.c:629!" Reported-by: Han-Wen Nienhuys Signed-off-by: Miklos Szeredi CC: stable@kernel.org diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 640fc22..168a80f 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1358,6 +1358,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size, if (outarg.namelen > FUSE_NAME_MAX) goto err; + err = -EINVAL; + if (size != sizeof(outarg) + outarg.namelen + 1) + goto err; + name.name = buf; name.len = outarg.namelen; err = fuse_copy_one(cs, buf, outarg.namelen + 1); -- cgit v0.10.2 From c8d47631a48f254d062db8084776d1fb24785e7b Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Tue, 9 Aug 2011 20:17:29 +0200 Subject: i2c-nomadik: fix kerneldoc warning There was a missing struct item in the kerneldoc, add it and fix another pretty-printing formatting issue with a missing space. Signed-off-by: Linus Walleij Signed-off-by: Ben Dooks diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index f9b8854..b228e09 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -146,6 +146,7 @@ struct i2c_nmk_client { * @stop: stop condition * @xfer_complete: acknowledge completion for a I2C message * @result: controller propogated result + * @regulator: pointer to i2c regulator * @busy: Busy doing transfer */ struct nmk_i2c_dev { @@ -509,7 +510,7 @@ static int write_i2c(struct nmk_i2c_dev *dev) if (timeout < 0) { dev_err(&dev->pdev->dev, - "wait_for_completion_timeout" + "wait_for_completion_timeout " "returned %d waiting for event\n", timeout); status = timeout; } -- cgit v0.10.2 From caca9510ff4e5d842c0589110243d60927836222 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 24 Aug 2011 15:55:30 -0700 Subject: firmware loader: allow builtin firmware load even if usermodehelper is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In commit a144c6a6c924 ("PM: Print a warning if firmware is requested when tasks are frozen") we not only printed a warning if somebody tried to load the firmware when tasks are frozen - we also failed the load. But that check was done before the check for built-in firmware, and then when we disallowed usermode helpers during bootup (commit 288d5abec831: "Boot up with usermodehelper disabled"), that actually means that built-in modules can no longer load their firmware even if the firmware is built in too. Which used to work, and some people depended on it for the R100 driver. So move the test for usermodehelper_is_disabled() down, to after checking the built-in firmware. This should fix: https://bugzilla.kernel.org/show_bug.cgi?id=40952 Reported-by: James Cloos Bisected-by: Elimar Riesebieter Cc: Michel Dänzer Cc: Rafael Wysocki Cc: Greg Kroah-Hartman Cc: Valdis Kletnieks Signed-off-by: Linus Torvalds diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index bbb03e6..06ed6b4 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -521,11 +521,6 @@ static int _request_firmware(const struct firmware **firmware_p, if (!firmware_p) return -EINVAL; - if (WARN_ON(usermodehelper_is_disabled())) { - dev_err(device, "firmware: %s will not be loaded\n", name); - return -EBUSY; - } - *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", @@ -539,6 +534,12 @@ static int _request_firmware(const struct firmware **firmware_p, return 0; } + if (WARN_ON(usermodehelper_is_disabled())) { + dev_err(device, "firmware: %s will not be loaded\n", name); + retval = -EBUSY; + goto out; + } + if (uevent) dev_dbg(device, "firmware: requesting %s\n", name); -- cgit v0.10.2 From be27425dcc516fd08245b047ea57f83b8f6f0903 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 19 Aug 2011 16:15:10 -0700 Subject: Add a personality to report 2.6.x version numbers I ran into a couple of programs which broke with the new Linux 3.0 version. Some of those were binary only. I tried to use LD_PRELOAD to work around it, but it was quite difficult and in one case impossible because of a mix of 32bit and 64bit executables. For example, all kind of management software from HP doesnt work, unless we pretend to run a 2.6 kernel. $ uname -a Linux svivoipvnx001 3.0.0-08107-g97cd98f #1062 SMP Fri Aug 12 18:11:45 CEST 2011 i686 i686 i386 GNU/Linux $ hpacucli ctrl all show Error: No controllers detected. $ rpm -qf /usr/sbin/hpacucli hpacucli-8.75-12.0 Another notable case is that Python now reports "linux3" from sys.platform(); which in turn can break things that were checking sys.platform() == "linux2": https://bugzilla.mozilla.org/show_bug.cgi?id=664564 It seems pretty clear to me though it's a bug in the apps that are using '==' instead of .startswith(), but this allows us to unbreak broken programs. This patch adds a UNAME26 personality that makes the kernel report a 2.6.40+x version number instead. The x is the x in 3.x. I know this is somewhat ugly, but I didn't find a better workaround, and compatibility to existing programs is important. Some programs also read /proc/sys/kernel/osrelease. This can be worked around in user space with mount --bind (and a mount namespace) To use: wget ftp://ftp.kernel.org/pub/linux/kernel/people/ak/uname26/uname26.c gcc -o uname26 uname26.c ./uname26 program Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds diff --git a/include/linux/personality.h b/include/linux/personality.h index eec3bae..8fc7dd1a 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h @@ -22,6 +22,7 @@ extern int __set_personality(unsigned int); * These occupy the top three bytes. */ enum { + UNAME26 = 0x0020000, ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors * (signal handling) diff --git a/kernel/sys.c b/kernel/sys.c index dd948a1..18ee1d2 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include #include @@ -44,6 +46,8 @@ #include #include +/* Move somewhere else to avoid recompiling? */ +#include #include #include @@ -1161,6 +1165,34 @@ DECLARE_RWSEM(uts_sem); #define override_architecture(name) 0 #endif +/* + * Work around broken programs that cannot handle "Linux 3.0". + * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 + */ +static int override_release(char __user *release, int len) +{ + int ret = 0; + char buf[len]; + + if (current->personality & UNAME26) { + char *rest = UTS_RELEASE; + int ndots = 0; + unsigned v; + + while (*rest) { + if (*rest == '.' && ++ndots >= 3) + break; + if (!isdigit(*rest) && *rest != '.') + break; + rest++; + } + v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; + snprintf(buf, len, "2.6.%u%s", v, rest); + ret = copy_to_user(release, buf, len); + } + return ret; +} + SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) { int errno = 0; @@ -1170,6 +1202,8 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) errno = -EFAULT; up_read(&uts_sem); + if (!errno && override_release(name->release, sizeof(name->release))) + errno = -EFAULT; if (!errno && override_architecture(name)) errno = -EFAULT; return errno; @@ -1191,6 +1225,8 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) error = -EFAULT; up_read(&uts_sem); + if (!error && override_release(name->release, sizeof(name->release))) + error = -EFAULT; if (!error && override_architecture(name)) error = -EFAULT; return error; @@ -1225,6 +1261,8 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) if (!error && override_architecture(name)) error = -EFAULT; + if (!error && override_release(name->release, sizeof(name->release))) + error = -EFAULT; return error ? -EFAULT : 0; } #endif -- cgit v0.10.2 From e096d0c7e2e4e5893792db865dd065ac73cf1f00 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Thu, 25 Aug 2011 07:48:12 -0400 Subject: lockdep: Add helper function for dir vs file i_mutex annotation Purely in-memory filesystems do not use the inode hash as the dcache tells us if an entry already exists. As a result, they do not call unlock_new_inode, and thus directory inodes do not get put into a different lockdep class for i_sem. We need the different lockdep classes, because the locking order for i_mutex is different for directory inodes and regular inodes. Directory inodes can do "readdir()", which takes i_mutex *before* possibly taking mm->mmap_sem (due to a page fault while copying the directory entry to user space). In contrast, regular inodes can be mmap'ed, which takes mm->mmap_sem before accessing i_mutex. The two cases can never happen for the same inode, so no real deadlock can occur, but without the different lockdep classes, lockdep cannot understand that. As a result, if CONFIG_DEBUG_LOCK_ALLOC is set, this can lead to false positives from lockdep like below: find/645 is trying to acquire lock: (&mm->mmap_sem){++++++}, at: [] might_fault+0x5c/0xac but task is already holding lock: (&sb->s_type->i_mutex_key#15){+.+.+.}, at: [] vfs_readdir+0x5b/0xb4 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&sb->s_type->i_mutex_key#15){+.+.+.}: [] lock_acquire+0xbf/0x103 [] __mutex_lock_common+0x4c/0x361 [] mutex_lock_nested+0x40/0x45 [] hugetlbfs_file_mmap+0x82/0x110 [] mmap_region+0x258/0x432 [] do_mmap_pgoff+0x2ac/0x306 [] sys_mmap_pgoff+0x118/0x16a [] sys_mmap+0x22/0x24 [] system_call_fastpath+0x16/0x1b -> #0 (&mm->mmap_sem){++++++}: [] __lock_acquire+0xa1a/0xcf7 [] lock_acquire+0xbf/0x103 [] might_fault+0x89/0xac [] filldir+0x6f/0xc7 [] dcache_readdir+0x67/0x205 [] vfs_readdir+0x7b/0xb4 [] sys_getdents+0x7e/0xd1 [] system_call_fastpath+0x16/0x1b This patch moves the directory vs file lockdep annotation into a helper function that can be called by in-memory filesystems and has hugetlbfs call it. Signed-off-by: Josh Boyer Acked-by: Peter Zijlstra Signed-off-by: Linus Torvalds diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 87b6e04..ec88953 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -491,6 +491,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, inode->i_op = &page_symlink_inode_operations; break; } + lockdep_annotate_inode_mutex_key(inode); } return inode; } diff --git a/fs/inode.c b/fs/inode.c index 73920d5..ec79246 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -848,16 +848,9 @@ struct inode *new_inode(struct super_block *sb) } EXPORT_SYMBOL(new_inode); -/** - * unlock_new_inode - clear the I_NEW state and wake up any waiters - * @inode: new inode to unlock - * - * Called when the inode is fully initialised to clear the new state of the - * inode and wake up anyone waiting for the inode to finish initialisation. - */ -void unlock_new_inode(struct inode *inode) -{ #ifdef CONFIG_DEBUG_LOCK_ALLOC +void lockdep_annotate_inode_mutex_key(struct inode *inode) +{ if (S_ISDIR(inode->i_mode)) { struct file_system_type *type = inode->i_sb->s_type; @@ -873,7 +866,20 @@ void unlock_new_inode(struct inode *inode) &type->i_mutex_dir_key); } } +} +EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); #endif + +/** + * unlock_new_inode - clear the I_NEW state and wake up any waiters + * @inode: new inode to unlock + * + * Called when the inode is fully initialised to clear the new state of the + * inode and wake up anyone waiting for the inode to finish initialisation. + */ +void unlock_new_inode(struct inode *inode) +{ + lockdep_annotate_inode_mutex_key(inode); spin_lock(&inode->i_lock); WARN_ON(!(inode->i_state & I_NEW)); inode->i_state &= ~I_NEW; diff --git a/include/linux/fs.h b/include/linux/fs.h index 178cdb4..c2bd68f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2318,6 +2318,11 @@ extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*te extern struct inode * iget_locked(struct super_block *, unsigned long); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void lockdep_annotate_inode_mutex_key(struct inode *inode); +#else +static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; +#endif extern void unlock_new_inode(struct inode *); extern unsigned int get_next_ino(void); -- cgit v0.10.2 From cbbfa38fcb95930babc5233cf6927ec430f38abc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 25 Aug 2011 19:46:56 +0200 Subject: mtrr: fix UP breakage caused during switch to stop_machine While removing custom rendezvous code and switching to stop_machine, commit 192d8857427d ("x86, mtrr: use stop_machine APIs for doing MTRR rendezvous") completely dropped mtrr setting code on !CONFIG_SMP breaking MTRR settting on UP. Fix it by removing the incorrect CONFIG_SMP. Signed-off-by: Tejun Heo Reported-by: Anders Eriksson Tested-and-acked-by: Suresh Siddha Acked-by: H. Peter Anvin Signed-off-by: Linus Torvalds diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 08119a3..6b96110 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -149,7 +149,6 @@ struct set_mtrr_data { */ static int mtrr_rendezvous_handler(void *info) { -#ifdef CONFIG_SMP struct set_mtrr_data *data = info; /* @@ -171,7 +170,6 @@ static int mtrr_rendezvous_handler(void *info) } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) { mtrr_if->set_all(); } -#endif return 0; } -- cgit v0.10.2 From b4ca46e4e82a0a5976fe5eab85be585d75f8202f Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 25 Aug 2011 16:10:33 -0400 Subject: x86-32: Fix boot with CONFIG_X86_INVD_BUG entry_32.S contained a hardcoded alternative instruction entry, and the format changed in commit 59e97e4d6fbc ("x86: Make alternative instruction pointers relative"). Replace the hardcoded entry with the altinstruction_entry macro. This fixes the 32-bit boot with CONFIG_X86_INVD_BUG=y. Reported-and-tested-by: Arnaud Lacombe Signed-off-by: Andy Lutomirski Cc: Peter Anvin Cc: Ingo Molnar Signed-off-by: Linus Torvalds diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 5c1a9197..f3f6f53 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -54,6 +54,7 @@ #include #include #include +#include /* Avoid __ASSEMBLER__'ifying just for this. */ #include @@ -873,12 +874,7 @@ ENTRY(simd_coprocessor_error) 661: pushl_cfi $do_general_protection 662: .section .altinstructions,"a" - .balign 4 - .long 661b - .long 663f - .word X86_FEATURE_XMM - .byte 662b-661b - .byte 664f-663f + altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f .previous .section .altinstr_replacement,"ax" 663: pushl $do_simd_coprocessor_error -- cgit v0.10.2 From 06ed4625fdfffee1251708cd30de276186d5fdcf Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Thu, 25 Aug 2011 15:59:01 -0700 Subject: drivers/misc/pti.c: add missing includes Found on allmodconfig build (ARCH=alpha) drivers/misc/pti.c: In function 'get_id': drivers/misc/pti.c:249: error: implicit declaration of function 'kmalloc' drivers/misc/pti.c: In function 'pti_char_write': drivers/misc/pti.c:658: error: implicit declaration of function 'copy_from_user' Signed-off-by: Sergei Trofimovich Cc: Greg Kroah-Hartman Cc: J Freyensee Cc: Jeremy Rocher Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/misc/pti.c b/drivers/misc/pti.c index 8653bd0..06df187 100644 --- a/drivers/misc/pti.c +++ b/drivers/misc/pti.c @@ -33,6 +33,8 @@ #include #include #include +#include +#include #define DRIVERNAME "pti" #define PCINAME "pciPTI" -- cgit v0.10.2 From 2df7a7d1cd07626dd235ca102830ebfc6c01a09e Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Thu, 25 Aug 2011 15:59:02 -0700 Subject: alpha: unbreak osf_setsysinfo(SSI_NVPAIRS, [SSIN_UACPROC, UAC_SIGBUS]) The bug was accidentally found by the following program: #include #include #include static int setsysinfo(unsigned long op, void *buffer, unsigned long size, int *start, void *arg, unsigned long flag) { return syscall(__NR_osf_setsysinfo, op, buffer, size, start, arg, flag); } int main(int argc, char **argv) { short x[10]; unsigned int buf[2] = { SSIN_UACPROC, UAC_SIGBUS, }; setsysinfo(SSI_NVPAIRS, buf, 1, 0, 0, 0); int *y = (int*) (x+1); *y = 0; return 0; } The program shoud fail on SIGBUS, but didn't. The patch is a second part of userspace flag fix (commit 745dd2405e28 "Alpha: Rearrange thread info flags fixing two regressions"). Deleted outdated out-of-sync 'UAC_SHIFT' (the cause of bug) in favour of 'ALPHA_UAC_SHIFT'. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Sergei Trofimovich Acked-by: Michael Cree Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/alpha/include/asm/sysinfo.h b/arch/alpha/include/asm/sysinfo.h index 086aba2..e77d77c 100644 --- a/arch/alpha/include/asm/sysinfo.h +++ b/arch/alpha/include/asm/sysinfo.h @@ -27,13 +27,4 @@ #define UAC_NOFIX 2 #define UAC_SIGBUS 4 - -#ifdef __KERNEL__ - -/* This is the shift that is applied to the UAC bits as stored in the - per-thread flags. See thread_info.h. */ -#define UAC_SHIFT 6 - -#endif - #endif /* __ASM_ALPHA_SYSINFO_H */ diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 6f32f9c..ff73db0 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -74,9 +74,9 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_POLLING_NRFLAG 8 /* poll_idle is polling NEED_RESCHED */ #define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ -#define TIF_UAC_NOPRINT 10 /* see sysinfo.h */ -#define TIF_UAC_NOFIX 11 -#define TIF_UAC_SIGBUS 12 +#define TIF_UAC_NOPRINT 10 /* ! Preserve sequence of following */ +#define TIF_UAC_NOFIX 11 /* ! flags as they match */ +#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */ #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ #define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */ #define TIF_FREEZE 16 /* is freezing for suspend */ @@ -97,7 +97,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \ | _TIF_SYSCALL_TRACE) -#define ALPHA_UAC_SHIFT 10 +#define ALPHA_UAC_SHIFT TIF_UAC_NOPRINT #define ALPHA_UAC_MASK (1 << TIF_UAC_NOPRINT | 1 << TIF_UAC_NOFIX | \ 1 << TIF_UAC_SIGBUS) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 326f0a2..01e8715 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -633,9 +634,10 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, case GSI_UACPROC: if (nbytes < sizeof(unsigned int)) return -EINVAL; - w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK; - if (put_user(w, (unsigned int __user *)buffer)) - return -EFAULT; + w = (current_thread_info()->flags >> ALPHA_UAC_SHIFT) & + UAC_BITMASK; + if (put_user(w, (unsigned int __user *)buffer)) + return -EFAULT; return 1; case GSI_PROC_TYPE: @@ -756,8 +758,8 @@ SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer, case SSIN_UACPROC: again: old = current_thread_info()->flags; - new = old & ~(UAC_BITMASK << UAC_SHIFT); - new = new | (w & UAC_BITMASK) << UAC_SHIFT; + new = old & ~(UAC_BITMASK << ALPHA_UAC_SHIFT); + new = new | (w & UAC_BITMASK) << ALPHA_UAC_SHIFT; if (cmpxchg(¤t_thread_info()->flags, old, new) != old) goto again; -- cgit v0.10.2 From 58299449257566613f58dcfb757f0ba4a377987a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 25 Aug 2011 15:59:04 -0700 Subject: w1: fix for loop in w1_f29_remove_slave() The for loop was looking for i <= 0 instead of i >= 0 so this function never did anything. Also we started with i = NB_SYSFS_BIN_FILES instead of "NB_SYSFS_BIN_FILES - 1" which is an off by one bug. Reported-by: Bojan Prtvar Signed-off-by: Dan Carpenter Acked-by: Jean-Franois Dagenais Cc: Evgeniy Polyakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c index c377818..7c8cdb8 100644 --- a/drivers/w1/slaves/w1_ds2408.c +++ b/drivers/w1/slaves/w1_ds2408.c @@ -373,7 +373,7 @@ static int w1_f29_add_slave(struct w1_slave *sl) static void w1_f29_remove_slave(struct w1_slave *sl) { int i; - for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) + for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i) sysfs_remove_bin_file(&sl->dev.kobj, &(w1_f29_sysfs_bin_files[i])); } -- cgit v0.10.2 From a801876638c5ce650223476c4eb8f37cea32dc1c Mon Sep 17 00:00:00 2001 From: Evgeniy Polyakov Date: Thu, 25 Aug 2011 15:59:06 -0700 Subject: MAINTAINERS: Evgeniy has moved Signed-off-by: Evgeniy Polyakov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/MAINTAINERS b/MAINTAINERS index 069ee3b..ee0ac2c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7087,7 +7087,7 @@ S: Supported F: drivers/mmc/host/vub300.c W1 DALLAS'S 1-WIRE BUS -M: Evgeniy Polyakov +M: Evgeniy Polyakov S: Maintained F: Documentation/w1/ F: drivers/w1/ diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 02bf7bf..b5abaae 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c @@ -1,7 +1,7 @@ /* * dscore.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify @@ -1024,5 +1024,5 @@ module_init(ds_init); module_exit(ds_fini); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_AUTHOR("Evgeniy Polyakov "); MODULE_DESCRIPTION("DS2490 USB <-> W1 bus master driver (DS9490*)"); diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c index 334d1cc..f667c26 100644 --- a/drivers/w1/masters/matrox_w1.c +++ b/drivers/w1/masters/matrox_w1.c @@ -1,7 +1,7 @@ /* * matrox_w1.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify @@ -39,7 +39,7 @@ #include "../w1_log.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_AUTHOR("Evgeniy Polyakov "); MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); static struct pci_device_id matrox_w1_tbl[] = { diff --git a/drivers/w1/slaves/w1_smem.c b/drivers/w1/slaves/w1_smem.c index cc8c02e..8465562 100644 --- a/drivers/w1/slaves/w1_smem.c +++ b/drivers/w1/slaves/w1_smem.c @@ -1,7 +1,7 @@ /* * w1_smem.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify @@ -32,7 +32,7 @@ #include "../w1_family.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_AUTHOR("Evgeniy Polyakov "); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, 64bit memory family."); static struct w1_family w1_smem_family_01 = { diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c index 402928b..a1ef9b5 100644 --- a/drivers/w1/slaves/w1_therm.c +++ b/drivers/w1/slaves/w1_therm.c @@ -1,7 +1,7 @@ /* * w1_therm.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify @@ -34,7 +34,7 @@ #include "../w1_family.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_AUTHOR("Evgeniy Polyakov "); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol, temperature family."); /* Allow the strong pullup to be disabled, but default to enabled. diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 6c136c1..c374978 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -1,7 +1,7 @@ /* * w1.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify @@ -42,7 +42,7 @@ #include "w1_netlink.h" MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_AUTHOR("Evgeniy Polyakov "); MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); static int w1_timeout = 10; diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 1ce23fc..4d012ca 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h @@ -1,7 +1,7 @@ /* * w1.h * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_family.c b/drivers/w1/w1_family.c index 4a09904..6335979 100644 --- a/drivers/w1/w1_family.c +++ b/drivers/w1/w1_family.c @@ -1,7 +1,7 @@ /* * w1_family.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index 98a1ac0..490cda2 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h @@ -1,7 +1,7 @@ /* * w1_family.h * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c index b50be3f..d220bce 100644 --- a/drivers/w1/w1_int.c +++ b/drivers/w1/w1_int.c @@ -1,7 +1,7 @@ /* * w1_int.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h index 4274082..2ad7d44 100644 --- a/drivers/w1/w1_int.h +++ b/drivers/w1/w1_int.h @@ -1,7 +1,7 @@ /* * w1_int.h * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 8e8b64c..765b37b 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c @@ -1,7 +1,7 @@ /* * w1_io.c * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h index e6ab7cf..9c7bd62 100644 --- a/drivers/w1/w1_log.h +++ b/drivers/w1/w1_log.h @@ -1,7 +1,7 @@ /* * w1_log.h * - * Copyright (c) 2004 Evgeniy Polyakov + * Copyright (c) 2004 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 55aabd9..40788c9 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c @@ -1,7 +1,7 @@ /* * w1_netlink.c * - * Copyright (c) 2003 Evgeniy Polyakov + * Copyright (c) 2003 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 27e950f..b0922dc 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h @@ -1,7 +1,7 @@ /* * w1_netlink.h * - * Copyright (c) 2003 Evgeniy Polyakov + * Copyright (c) 2003 Evgeniy Polyakov * * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/connector.h b/include/linux/connector.h index 0c69ad8..3c9c54f 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -1,7 +1,7 @@ /* * connector.h * - * 2004-2005 Copyright (c) Evgeniy Polyakov + * 2004-2005 Copyright (c) Evgeniy Polyakov * All rights reserved. * * This program is free software; you can redistribute it and/or modify -- cgit v0.10.2 From 5af12d0efdbd9967cc71a0a10c4025c4255a6254 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 25 Aug 2011 15:59:07 -0700 Subject: memcg: pin execution to current cpu while draining stock Commit d1a05b6973c7 ("memcg do not try to drain per-cpu caches without pages") added a drain_local_stock() call to a preemptible section. The draining task looks up the cpu-local stock twice to set the draining-flag, then to drain the stock and clear the flag again. If the task is migrated to a different CPU in between, noone will clear the flag on the first stock and it will be forever undrainable. Its charge can not be recovered and the cgroup can not be deleted anymore. Properly pin the task to the executing CPU while draining stocks. Signed-off-by: Johannes Weiner Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 930de94..0e40f02 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2169,13 +2169,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) /* Notify other cpus that system-wide "drain" is running */ get_online_cpus(); - /* - * Get a hint for avoiding draining charges on the current cpu, - * which must be exhausted by our charging. It is not required that - * this be a precise check, so we use raw_smp_processor_id() instead of - * getcpu()/putcpu(). - */ - curcpu = raw_smp_processor_id(); + curcpu = get_cpu(); for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *mem; @@ -2192,6 +2186,7 @@ static void drain_all_stock(struct mem_cgroup *root_mem, bool sync) schedule_work_on(cpu, &stock->work); } } + put_cpu(); if (!sync) goto out; -- cgit v0.10.2 From 3d1c2f72a9464c9880054194af0c041d7beb9124 Mon Sep 17 00:00:00 2001 From: Ralf Thielow Date: Thu, 25 Aug 2011 15:59:07 -0700 Subject: scripts/get_maintainer.pl: update Linus's git repository Change to new git tree - (git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git). Signed-off-by: Ralf Thielow Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl index eb2f1e6..4594f33 100755 --- a/scripts/get_maintainer.pl +++ b/scripts/get_maintainer.pl @@ -1389,7 +1389,7 @@ sub vcs_exists { warn("$P: No supported VCS found. Add --nogit to options?\n"); warn("Using a git repository produces better results.\n"); warn("Try Linus Torvalds' latest git repository using:\n"); - warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git\n"); + warn("git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git\n"); $printed_novcs = 1; } return 0; -- cgit v0.10.2 From 30ecad51849ae132dc6ef6ddb62d499c7257515b Mon Sep 17 00:00:00 2001 From: Hui Zhu Date: Thu, 25 Aug 2011 15:59:08 -0700 Subject: checkpatch: add missing WARN argument for min_t and max_t tests The test for bad usage of min_t() and max_t() is missing the --ignore type. Add it. Signed-off-by: Hui Zhu Signed-off-by: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9d761c9..3dfc471 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2574,7 +2574,8 @@ sub process { } else { $cast = $cast2; } - WARN("$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); + WARN("MINMAX", + "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . $herecurr); } } -- cgit v0.10.2 From 7e8aa048989bf7e0604996a3e2068fb1a81f81bd Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Thu, 25 Aug 2011 15:59:09 -0700 Subject: drivers/char/msm_smd_pkt.c: don't use IS_ERR() The various basic memory allocation function return NULL, not an ERR_PTR. The semantic patch that makes this change is available in scripts/coccinelle/null/eno.cocci. More information about semantic patching is available at http://coccinelle.lip6.fr/ Signed-off-by: Thomas Meyer Cc: Niranjana Vishwanathapura Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c index b6f8a65..8eca55d 100644 --- a/drivers/char/msm_smd_pkt.c +++ b/drivers/char/msm_smd_pkt.c @@ -379,9 +379,8 @@ static int __init smd_pkt_init(void) for (i = 0; i < NUM_SMD_PKT_PORTS; ++i) { smd_pkt_devp[i] = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL); - if (IS_ERR(smd_pkt_devp[i])) { - r = PTR_ERR(smd_pkt_devp[i]); - pr_err("kmalloc() failed %d\n", r); + if (!smd_pkt_devp[i]) { + pr_err("kmalloc() failed\n"); goto clean_cdevs; } -- cgit v0.10.2 From f51bdd2e97098a5cbb3cba7c3a56fa0e9ac3c444 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 25 Aug 2011 15:59:10 -0700 Subject: mm: fix a vmscan warning I get the below warning: BUG: using smp_processor_id() in preemptible [00000000] code: bash/746 caller is native_sched_clock+0x37/0x6e Pid: 746, comm: bash Tainted: G W 3.0.0+ #254 Call Trace: [] debug_smp_processor_id+0xc2/0xdc [] native_sched_clock+0x37/0x6e [] try_to_free_mem_cgroup_pages+0x7d/0x270 [] mem_cgroup_force_empty+0x24b/0x27a [] ? sys_close+0x38/0x138 [] ? sys_close+0x38/0x138 [] mem_cgroup_force_empty_write+0x17/0x19 [] cgroup_file_write+0xa8/0xba [] vfs_write+0xb3/0x138 [] sys_write+0x4a/0x71 [] ? sys_close+0xf0/0x138 [] system_call_fastpath+0x16/0x1b sched_clock() can't be used with preempt enabled. And we don't need fast approach to get clock here, so let's use ktime API. Signed-off-by: Shaohua Li Acked-by: KAMEZAWA Hiroyuki Tested-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ef6912..22631e0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, .mem_cgroup = mem, .memcg_record = rec, }; - unsigned long start, end; + ktime_t start, end; sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, sc.may_writepage, sc.gfp_mask); - start = sched_clock(); + start = ktime_get(); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. @@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, * the priority and make it zero. */ shrink_zone(0, zone, &sc); - end = sched_clock(); + end = ktime_get(); if (rec) - rec->elapsed += end - start; + rec->elapsed += ktime_to_ns(ktime_sub(end, start)); *scanned = sc.nr_scanned; trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); @@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, { struct zonelist *zonelist; unsigned long nr_reclaimed; - unsigned long start, end; + ktime_t start, end; int nid; struct scan_control sc = { .may_writepage = !laptop_mode, @@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, .gfp_mask = sc.gfp_mask, }; - start = sched_clock(); + start = ktime_get(); /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the @@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, sc.gfp_mask); nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); - end = sched_clock(); + end = ktime_get(); if (rec) - rec->elapsed += end - start; + rec->elapsed += ktime_to_ns(ktime_sub(end, start)); trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); -- cgit v0.10.2 From 47331231bb997a8adb79774fc4cf4bb48fe4e00a Mon Sep 17 00:00:00 2001 From: Wanlong Gao Date: Thu, 25 Aug 2011 15:59:10 -0700 Subject: MAINTAINERS: Paul Menage has moved Paul said: I left Google at the end of last week - if it's not bouncing already, menage@google.com isn't going to work for much longer. Signed-off-by: Wanlong Gao Acked-by: Paul Menage Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/MAINTAINERS b/MAINTAINERS index ee0ac2c..d942920 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1883,7 +1883,7 @@ S: Maintained F: drivers/connector/ CONTROL GROUPS (CGROUPS) -M: Paul Menage +M: Paul Menage M: Li Zefan L: containers@lists.linux-foundation.org S: Maintained @@ -1932,7 +1932,7 @@ S: Maintained F: tools/power/cpupower CPUSETS -M: Paul Menage +M: Paul Menage W: http://www.bullopensource.org/cpuset/ W: http://oss.sgi.com/projects/cpusets/ S: Supported -- cgit v0.10.2 From 4c30c6f566c0989ddaee3407da44751e340a63ed Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Thu, 25 Aug 2011 15:59:11 -0700 Subject: kernel/printk: do not turn off bootconsole in printk_late_init() if keep_bootcon It seems that 7bf693951a8e ("console: allow to retain boot console via boot option keep_bootcon") doesn't always achieve what it aims, as when printk_late_init() runs it unconditionally turns off all boot consoles. With this patch, I am able to see more messages on the boot console in KVM guests than I can without, when keep_bootcon is specified. I think it is appropriate for the relevant -stable trees. However, it's more of an annoyance than a serious bug (ideally you don't need to keep the boot console around as console handover should be working -- I was encountering a situation where the console handover wasn't working and not having the boot console available meant I couldn't see why). Signed-off-by: Nishanth Aravamudan Cc: David S. Miller Cc: Alan Cox Cc: Greg KH Acked-by: Fabio M. Di Nitto Cc: [2.6.39.x, 3.0.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/kernel/printk.c b/kernel/printk.c index 836a2ae..28a40d8 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1604,7 +1604,7 @@ static int __init printk_late_init(void) struct console *con; for_each_console(con) { - if (con->flags & CON_BOOT) { + if (!keep_bootcon && con->flags & CON_BOOT) { printk(KERN_INFO "turn off boot console %s%d\n", con->name, con->index); unregister_console(con); -- cgit v0.10.2 From 439423f6894aa0dec22187526827456f5004baed Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 25 Aug 2011 15:59:12 -0700 Subject: vmscan: clear ZONE_CONGESTED for zone with good watermark ZONE_CONGESTED is only cleared in kswapd, but pages can be freed in any task. It's possible ZONE_CONGESTED isn't cleared in some cases: 1. the zone is already balanced just entering balance_pgdat() for order-0 because concurrent tasks free memory. In this case, later check will skip the zone as it's balanced so the flag isn't cleared. 2. high order balance fallbacks to order-0. quote from Mel: At the end of balance_pgdat(), kswapd uses the following logic; If reclaiming at high order { for each zone { if all_unreclaimable skip if watermark is not met order = 0 loop again /* watermark is met */ clear congested } } i.e. it clears ZONE_CONGESTED if it the zone is balanced. if not, it restarts balancing at order-0. However, if the higher zones are balanced for order-0, kswapd will miss clearing ZONE_CONGESTED as that only happens after a zone is shrunk. This can mean that wait_iff_congested() stalls unnecessarily. This patch makes kswapd clear ZONE_CONGESTED during its initial highmem->dma scan for zones that are already balanced. Signed-off-by: Shaohua Li Acked-by: Mel Gorman Reviewed-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/vmscan.c b/mm/vmscan.c index 22631e0..b7719ec 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2529,6 +2529,9 @@ loop_again: high_wmark_pages(zone), 0, 0)) { end_zone = i; break; + } else { + /* If balanced, clear the congested flag */ + zone_clear_flag(zone, ZONE_CONGESTED); } } if (i < 0) -- cgit v0.10.2 From 284fb68d00c56e971ed01e0b4bac5ddd4d1b74ab Mon Sep 17 00:00:00 2001 From: Alexandre Bounine Date: Thu, 25 Aug 2011 15:59:13 -0700 Subject: rapidio: fix use of non-compatible registers Replace/remove use of RIO v.1.2 registers/bits that are not forward-compatible with newer versions of RapidIO specification. RapidIO specification v.1.3 removed Write Port CSR, Doorbell CSR, Mailbox CSR and Mailbox and Doorbell bits of the PEF CAR. Use of removed (since RIO v.1.3) register bits affects users of currently available 1.3 and 2.x compliant devices who may use not so recent kernel versions. Removing checks for unsupported bits makes corresponding routines compatible with all versions of RapidIO specification. Therefore, backporting makes stable kernel versions compliant with RIO v.1.3 and later as well. Signed-off-by: Alexandre Bounine Cc: Kumar Gala Cc: Matt Porter Cc: Li Yang Cc: Thomas Moll Cc: Chul Kim Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 86ac38c..3bb1311 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -80,13 +80,13 @@ static int rionet_capable = 1; */ static struct rio_dev **rionet_active; -#define is_rionet_capable(pef, src_ops, dst_ops) \ - ((pef & RIO_PEF_INB_MBOX) && \ - (pef & RIO_PEF_INB_DOORBELL) && \ +#define is_rionet_capable(src_ops, dst_ops) \ + ((src_ops & RIO_SRC_OPS_DATA_MSG) && \ + (dst_ops & RIO_DST_OPS_DATA_MSG) && \ (src_ops & RIO_SRC_OPS_DOORBELL) && \ (dst_ops & RIO_DST_OPS_DOORBELL)) #define dev_rionet_capable(dev) \ - is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) + is_rionet_capable(dev->src_ops, dev->dst_ops) #define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) #define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) @@ -282,7 +282,6 @@ static int rionet_open(struct net_device *ndev) { int i, rc = 0; struct rionet_peer *peer, *tmp; - u32 pwdcsr; struct rionet_private *rnet = netdev_priv(ndev); if (netif_msg_ifup(rnet)) @@ -332,13 +331,8 @@ static int rionet_open(struct net_device *ndev) continue; } - /* - * If device has initialized inbound doorbells, - * send a join message - */ - rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); - if (pwdcsr & RIO_DOORBELL_AVAIL) - rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); + /* Send a join message */ + rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); } out: @@ -492,7 +486,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) { int rc = -ENODEV; - u32 lpef, lsrc_ops, ldst_ops; + u32 lsrc_ops, ldst_ops; struct rionet_peer *peer; struct net_device *ndev = NULL; @@ -515,12 +509,11 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) * on later probes */ if (!rionet_check) { - rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, &lsrc_ops); rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, &ldst_ops); - if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { + if (!is_rionet_capable(lsrc_ops, ldst_ops)) { printk(KERN_ERR "%s: local device is not network capable\n", DRV_NAME); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index ee89358..ebe77dd 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -505,8 +505,7 @@ static struct rio_dev __devinit *rio_setup_device(struct rio_net *net, rdev->dev.dma_mask = &rdev->dma_mask; rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - if ((rdev->pef & RIO_PEF_INB_DOORBELL) && - (rdev->dst_ops & RIO_DST_OPS_DOORBELL)) + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index 9026b30..218168a 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h @@ -36,12 +36,12 @@ #define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */ #define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */ #define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */ -#define RIO_PEF_INB_MBOX 0x00f00000 /* [II] Mailboxes */ -#define RIO_PEF_INB_MBOX0 0x00800000 /* [II] Mailbox 0 */ -#define RIO_PEF_INB_MBOX1 0x00400000 /* [II] Mailbox 1 */ -#define RIO_PEF_INB_MBOX2 0x00200000 /* [II] Mailbox 2 */ -#define RIO_PEF_INB_MBOX3 0x00100000 /* [II] Mailbox 3 */ -#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II] Doorbells */ +#define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */ +#define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */ +#define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */ +#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */ +#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */ +#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */ #define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */ #define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */ #define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */ @@ -102,7 +102,7 @@ #define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */ #define RIO_RT_MAX_DESTID 0x0000ffff -#define RIO_MBOX_CSR 0x40 /* [II] Mailbox CSR */ +#define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */ #define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */ #define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */ #define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */ @@ -128,8 +128,8 @@ #define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */ #define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */ -#define RIO_WRITE_PORT_CSR 0x44 /* [I] Write Port CSR */ -#define RIO_DOORBELL_CSR 0x44 /* [II] Doorbell CSR */ +#define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */ +#define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */ #define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */ #define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */ #define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */ -- cgit v0.10.2 From 15b1a8f2b990c0c1dacfad0e5ccaf05c32c52147 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 25 Aug 2011 15:59:14 -0700 Subject: drivers/video/backlight/ep93xx_bl.c: add missing include of linux/module.h ep93xx_bl.c uses interfaces from linux/module.h, so it should include that file. This patch fixes build errors: CC [M] drivers/video/backlight/ep93xx_bl.o drivers/video/backlight/ep93xx_bl.c:138: error: 'THIS_MODULE' undeclared here (not in a function) drivers/video/backlight/ep93xx_bl.c:158: error: expected declaration specifiers or '...' before string constant drivers/video/backlight/ep93xx_bl.c:158: warning: data definition has no type or storage class ... Signed-off-by: Axel Lin Acked-by: H Hartley Sweeten Cc: Ryan Mallon Cc: Richard Purdie Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c index 9f1e389..b058291 100644 --- a/drivers/video/backlight/ep93xx_bl.c +++ b/drivers/video/backlight/ep93xx_bl.c @@ -11,7 +11,7 @@ * BRIGHT, on the Cirrus EP9307, EP9312, and EP9315 processors. */ - +#include #include #include #include -- cgit v0.10.2 From 86383b55791bd97e88ef493e33ef521ee244f3d9 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 25 Aug 2011 15:59:15 -0700 Subject: leds: add missing include of linux/module.h Add missing include of linux/module.h for drivers that use interfaces from linux/module.h. This patch fixes build errors. Signed-off-by: Axel Lin Cc: Jonathan McDowell Acked-by: Kristoffer Ericson Cc: Magnus Damm Cc: Richard Purdie Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c index b982603..8c00937 100644 --- a/drivers/leds/leds-ams-delta.c +++ b/drivers/leds/leds-ams-delta.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c index e4ce1fd..bcfbd3a 100644 --- a/drivers/leds/leds-hp6xx.c +++ b/drivers/leds/leds-hp6xx.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include -- cgit v0.10.2 From 23751be0094012eb6b4756fa80ca54b3eb83069f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 25 Aug 2011 15:59:16 -0700 Subject: memcg: fix hierarchical oom locking Commit 79dfdaccd1d5 ("memcg: make oom_lock 0 and 1 based rather than counter") tried to oom lock the hierarchy and roll back upon encountering an already locked memcg. The code is confused when it comes to detecting a locked memcg, though, so it would fail and rollback after locking one memcg and encountering an unlocked second one. The result is that oom-locking hierarchies fails unconditionally and that every oom killer invocation simply goes to sleep on the oom waitqueue forever. The tasks practically hang forever without anyone intervening, possibly holding locks that trip up unrelated tasks, too. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0e40f02..ebd1e86 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1841,29 +1841,23 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, */ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) { - int lock_count = -1; struct mem_cgroup *iter, *failed = NULL; bool cond = true; for_each_mem_cgroup_tree_cond(iter, mem, cond) { - bool locked = iter->oom_lock; - - iter->oom_lock = true; - if (lock_count == -1) - lock_count = iter->oom_lock; - else if (lock_count != locked) { + if (iter->oom_lock) { /* * this subtree of our hierarchy is already locked * so we cannot give a lock. */ - lock_count = 0; failed = iter; cond = false; - } + } else + iter->oom_lock = true; } if (!failed) - goto done; + return true; /* * OK, we failed to lock the whole subtree so we have to clean up @@ -1877,8 +1871,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) } iter->oom_lock = false; } -done: - return lock_count; + return false; } /* -- cgit v0.10.2 From cc7993f6439b49909a8792660c4d0741fec9d584 Mon Sep 17 00:00:00 2001 From: Dilan Lee Date: Thu, 25 Aug 2011 15:59:17 -0700 Subject: backlight: add a callback 'notify_after' for backlight control We need a callback to do some things after pwm_enable, pwm_disable and pwm_config. Signed-off-by: Dilan Lee Reviewed-by: Robert Morell Reviewed-by: Arun Murthy Cc: Richard Purdie Cc: Paul Mundt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index b8f38ec..8b5b2a4 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -28,6 +28,8 @@ struct pwm_bl_data { unsigned int lth_brightness; int (*notify)(struct device *, int brightness); + void (*notify_after)(struct device *, + int brightness); int (*check_fb)(struct device *, struct fb_info *); }; @@ -55,6 +57,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl) pwm_config(pb->pwm, brightness, pb->period); pwm_enable(pb->pwm); } + + if (pb->notify_after) + pb->notify_after(pb->dev, brightness); + return 0; } @@ -105,6 +111,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->period = data->pwm_period_ns; pb->notify = data->notify; + pb->notify_after = data->notify_after; pb->check_fb = data->check_fb; pb->lth_brightness = data->lth_brightness * (data->pwm_period_ns / data->max_brightness); @@ -172,6 +179,8 @@ static int pwm_backlight_suspend(struct platform_device *pdev, pb->notify(pb->dev, 0); pwm_config(pb->pwm, 0, pb->period); pwm_disable(pb->pwm); + if (pb->notify_after) + pb->notify_after(pb->dev, 0); return 0; } diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 5e3e25a..63d2df4 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -14,6 +14,7 @@ struct platform_pwm_backlight_data { unsigned int pwm_period_ns; int (*init)(struct device *dev); int (*notify)(struct device *dev, int brightness); + void (*notify_after)(struct device *dev, int brightness); void (*exit)(struct device *dev); int (*check_fb)(struct device *dev, struct fb_info *info); }; -- cgit v0.10.2 From c53252b780e26c73c6a4e40bc14179447504cccd Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 25 Aug 2011 15:59:18 -0700 Subject: backlight: fix module alias prefix for adp8870_bl This is an i2c driver, not a platform driver, thus use "i2c" prefix for the module alias. Signed-off-by: Axel Lin Acked-by: Michael Hennerich Cc: Richard Purdie Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 05a8832..d06886a 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -1009,4 +1009,4 @@ module_exit(adp8870_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael Hennerich "); MODULE_DESCRIPTION("ADP8870 Backlight driver"); -MODULE_ALIAS("platform:adp8870-backlight"); +MODULE_ALIAS("i2c:adp8870-backlight"); -- cgit v0.10.2 From b89d5f17d4b02ae9f3a691c2cb260e1929c6261b Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 25 Aug 2011 15:59:19 -0700 Subject: drivers/misc/fsa9480.c: fix a leak of the IRQ during init failure Make sure we are passing the same cookie in all calls to request_threaded_irq() and free_irq(). Signed-off-by: Axel Lin Cc: Donggeun Kim Cc: Minkyu Kang Cc: Kyungmin Park Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/misc/fsa9480.c b/drivers/misc/fsa9480.c index 5325a7e..27dc0d2 100644 --- a/drivers/misc/fsa9480.c +++ b/drivers/misc/fsa9480.c @@ -455,7 +455,7 @@ static int __devinit fsa9480_probe(struct i2c_client *client, fail2: if (client->irq) - free_irq(client->irq, NULL); + free_irq(client->irq, usbsw); fail1: i2c_set_clientdata(client, NULL); kfree(usbsw); @@ -466,7 +466,7 @@ static int __devexit fsa9480_remove(struct i2c_client *client) { struct fsa9480_usbsw *usbsw = i2c_get_clientdata(client); if (client->irq) - free_irq(client->irq, NULL); + free_irq(client->irq, usbsw); i2c_set_clientdata(client, NULL); sysfs_remove_group(&client->dev.kobj, &fsa9480_group); -- cgit v0.10.2 From 37b7bf67c36d3a2b426c0cb2787d948949574103 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 25 Aug 2011 15:59:20 -0700 Subject: drivers/misc/ab8500-pwm.c: fix modalias Since 43cc71eed12 ("platform: prefix MODALIAS with "platform:""), the platform modalias is prefixed with "platform:". This patch changes the MODULE_ALIAS to "platform:ab8500-pwm". Signed-off-by: Axel Lin Acked-by: Arun Murthy Cc: Linus Walleij Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c index 54e3d05..3590315 100644 --- a/drivers/misc/ab8500-pwm.c +++ b/drivers/misc/ab8500-pwm.c @@ -164,5 +164,5 @@ subsys_initcall(ab8500_pwm_init); module_exit(ab8500_pwm_exit); MODULE_AUTHOR("Arun MURTHY "); MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); -MODULE_ALIAS("AB8500 PWM driver"); +MODULE_ALIAS("platform:ab8500-pwm"); MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 02016bc0381c4af9153b4e4008edfadae2bd6abc Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Thu, 25 Aug 2011 15:59:20 -0700 Subject: cris: add arch/cris/include/asm/serial.h Fix the following build errors: drivers/tty/serial/8250_early.c:160: error: 'BASE_BAUD' undeclared (first use in this function): 1 errors in 1 logs drivers/tty/serial/8250_early.c:37:24: error: asm/serial.h: No such file or directory: 1 errors in 1 logs I am not sure if (1843200 / 16) is suitable for cris, but most other arch's define it as this value. Signed-off-by: WANG Cong Cc: Mikael Starvik Cc: Jesper Nilsson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/cris/include/asm/serial.h b/arch/cris/include/asm/serial.h new file mode 100644 index 0000000..af7535a --- /dev/null +++ b/arch/cris/include/asm/serial.h @@ -0,0 +1,9 @@ +#ifndef _ASM_SERIAL_H +#define _ASM_SERIAL_H + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + */ +#define BASE_BAUD (1843200 / 16) + +#endif /* _ASM_SERIAL_H */ -- cgit v0.10.2 From 1424e21f66f4c51c31ba6ac188df46b43f51556b Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Thu, 25 Aug 2011 15:59:21 -0700 Subject: drivers/leds/leds-bd2802.c: bd2802_unregister_led_classdev() should unregister all registered leds bd2802_unregister_led_classdev() should unregister all registered instances of led_classdev class that had registered by bd2802_register_led_classdev(). Signed-off-by: Axel Lin Acked-by: Kim Kyuwon Cc: Richard Purdie Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index 3ebe382..ea21855 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c @@ -662,6 +662,11 @@ failed_unregister_led1_R: static void bd2802_unregister_led_classdev(struct bd2802_led *led) { cancel_work_sync(&led->work); + led_classdev_unregister(&led->cdev_led2b); + led_classdev_unregister(&led->cdev_led2g); + led_classdev_unregister(&led->cdev_led2r); + led_classdev_unregister(&led->cdev_led1b); + led_classdev_unregister(&led->cdev_led1g); led_classdev_unregister(&led->cdev_led1r); } -- cgit v0.10.2 From 4e8896cde182b4eab6f2d0af9b6eef87720fae0d Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Thu, 25 Aug 2011 15:59:22 -0700 Subject: drivers/rtc/rtc-s3c.c: correct debug messages RTC-S3C used to print out debug messages incorrectly. This patch corrects incorrect outputs. (undecoded bcd numbers, incorrectly decoded register values) This patch affects the pr-debug messages only. Signed-off-by: MyungJoo Ham Signed-off-by: Kyungmin Park Acked-by: Kukjin Kim Cc: Alessandro Zummo Cc: Changhwan Youn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 9329dbb..067207a 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -152,10 +152,6 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) goto retry_get_time; } - pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", - 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, - rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); - rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec); rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min); rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour); @@ -164,6 +160,11 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year); rtc_tm->tm_year += 100; + + pr_debug("read time %04d.%02d.%02d %02d:%02d:%02d\n", + 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, + rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); + rtc_tm->tm_mon -= 1; clk_disable(rtc_clk); @@ -269,10 +270,9 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) clk_enable(rtc_clk); pr_debug("s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", alrm->enabled, - 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, + 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); - alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; writeb(0x00, base + S3C2410_RTCALM); -- cgit v0.10.2 From 62d1760180c84cba68cc83696fa0bde0593007bd Mon Sep 17 00:00:00 2001 From: MyungJoo Ham Date: Thu, 25 Aug 2011 15:59:24 -0700 Subject: drivers/rtc/rtc-s3c.c: allow multiple open / allow no-ioctl-open'ed rtc to have irq. The previous rtc-s3c had two issues related with its IRQ. 1. Users cannot open rtc multiple times because an open operation calls request_irq on the same IRQ. (e.g., two user processes wants to open and read RTC time from rtc-s3c at the same time) 2. If alarm is set and no one has the rtc opened with filesystem (either the alarm is set by kernel/boot-loader or user set an alarm and closed rtc dev file), the pending bit is not cleared and no further interrupt is invoked. When the alarm is used by the system itself such as a resume from suspend-to-RAM or other Low-power modes/idle, this is a critical issue. This patch mitigates these issues by calling request_irq at probe and free_irq at remove. Signed-off-by: MyungJoo Ham Signed-off-by: Kyungmin Park Acked-by: Kukjin Kim Cc: Alessandro Zummo Cc: Changhwan Youn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 067207a..4e7c04e 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -319,49 +319,7 @@ static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) return 0; } -static int s3c_rtc_open(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - int ret; - - ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, - IRQF_DISABLED, "s3c2410-rtc alarm", rtc_dev); - - if (ret) { - dev_err(dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); - return ret; - } - - ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, - IRQF_DISABLED, "s3c2410-rtc tick", rtc_dev); - - if (ret) { - dev_err(dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); - goto tick_err; - } - - return ret; - - tick_err: - free_irq(s3c_rtc_alarmno, rtc_dev); - return ret; -} - -static void s3c_rtc_release(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct rtc_device *rtc_dev = platform_get_drvdata(pdev); - - /* do not clear AIE here, it may be needed for wake */ - - free_irq(s3c_rtc_alarmno, rtc_dev); - free_irq(s3c_rtc_tickno, rtc_dev); -} - static const struct rtc_class_ops s3c_rtcops = { - .open = s3c_rtc_open, - .release = s3c_rtc_release, .read_time = s3c_rtc_gettime, .set_time = s3c_rtc_settime, .read_alarm = s3c_rtc_getalarm, @@ -425,6 +383,9 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) { struct rtc_device *rtc = platform_get_drvdata(dev); + free_irq(s3c_rtc_alarmno, rtc); + free_irq(s3c_rtc_tickno, rtc); + platform_set_drvdata(dev, NULL); rtc_device_unregister(rtc); @@ -548,10 +509,32 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) s3c_rtc_setfreq(&pdev->dev, 1); + ret = request_irq(s3c_rtc_alarmno, s3c_rtc_alarmirq, + IRQF_DISABLED, "s3c2410-rtc alarm", rtc); + if (ret) { + dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret); + goto err_alarm_irq; + } + + ret = request_irq(s3c_rtc_tickno, s3c_rtc_tickirq, + IRQF_DISABLED, "s3c2410-rtc tick", rtc); + if (ret) { + dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret); + free_irq(s3c_rtc_alarmno, rtc); + goto err_tick_irq; + } + clk_disable(rtc_clk); return 0; + err_tick_irq: + free_irq(s3c_rtc_alarmno, rtc); + + err_alarm_irq: + platform_set_drvdata(pdev, NULL); + rtc_device_unregister(rtc); + err_nortc: s3c_rtc_enable(pdev, 0); clk_disable(rtc_clk); -- cgit v0.10.2 From 671ee7f0ce62e4b991b47fcf1c161c3f710dabbc Mon Sep 17 00:00:00 2001 From: Liu Gang-B34182 Date: Thu, 25 Aug 2011 15:59:25 -0700 Subject: arch/powerpc/sysdev/fsl_rio.c: correct IECSR register clear value This bug causes the IECSR register clear failure. In this case, the RETE (retry error threshold exceeded) interrupt will be generated and cannot be cleared. So the related ISR may be called persistently. The RETE bit in IECSR is cleared by writing a 1 to it. Signed-off-by: Liu Gang Cc: Benjamin Herrenschmidt Cc: Kumar Gala Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 2de8551..c65f75a 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -54,6 +54,7 @@ #define ODSR_CLEAR 0x1c00 #define LTLEECSR_ENABLE_ALL 0xFFC000FC #define ESCSR_CLEAR 0x07120204 +#define IECSR_CLEAR 0x80000000 #define RIO_PORT1_EDCSR 0x0640 #define RIO_PORT2_EDCSR 0x0680 @@ -1089,11 +1090,11 @@ static void port_error_handler(struct rio_mport *port, int offset) if (offset == 0) { out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); - out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0); + out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); } else { out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); - out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0); + out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR); out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); } } -- cgit v0.10.2